<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Attila Tanacs</style></author><author><style face="normal" font="default" size="100%">András Majdik</style></author><author><style face="normal" font="default" size="100%">Levente Hajder</style></author><author><style face="normal" font="default" size="100%">Jozsef Molnar</style></author><author><style face="normal" font="default" size="100%">Zsolt Santa</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Chu-Song Chen</style></author><author><style face="normal" font="default" size="100%">Mohan Kankanhall</style></author><author><style face="normal" font="default" size="100%">Shang-Hong Lai</style></author><author><style face="normal" font="default" size="100%">Joo Hwee</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Collaborative Mobile 3D Reconstruction of Urban Scenes</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the ACCV Workshop on Intelligent Mobile and Egocentric Vision (ACCV-IMEV), Lecture Notes in Computer Science</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2015</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Nov 2014</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Singapore</style></pub-location><pages><style face="normal" font="default" size="100%">1-16</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Attila Tanacs</style></author><author><style face="normal" font="default" size="100%">Joakim Lindbald</style></author><author><style face="normal" font="default" size="100%">Nataša Sladoje</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Estimation of linear deformations of 2D and 3D fuzzy objects</style></title><secondary-title><style face="normal" font="default" size="100%">PATTERN RECOGNITION</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2015</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Apr 2015</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Elsevier</style></publisher><volume><style face="normal" font="default" size="100%">48</style></volume><pages><style face="normal" font="default" size="100%">1387-1399</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p id=&quot;sp0080&quot;&gt;Registration is a fundamental task in image processing, it is used to determine geometric correspondences between images taken at different times and/or from different viewpoints. Here we propose a general framework in &lt;em&gt;n&lt;/em&gt;-dimensions to solve binary shape/object matching problems without the need of establishing additional point or other type of correspondences. The approach is based on generating and solving polynomial systems of equations. We also propose an extension which, provided that a suitable segmentation method can produce a fuzzy border representation, further increases the registration precision. Via numerous synthetic and real test we examine the different solution techniques of the polynomial systems of equations. We take into account a direct analytical, an iterative least-squares, and a combined method. Iterative and combined approaches produce the most precise results. Comparison is made against competing methods for rigid-body problems. Our method is orders of magnitude faster and is able to recover alignment regardless of the magnitude of the deformation compared to the narrow capture range of others. The applicability of the proposed methods is demonstrated on real X-ray images of hip replacement implants and 3D CT volumes of the pelvic area. Since the images must be parsed through only once, our approach is especially suitable for solving registration problems of large images.&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">4</style></issue><work-type><style face="normal" font="default" size="100%">Journal Article</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Realigning 2D and 3D Object Fragments without Correspondences</style></title><secondary-title><style face="normal" font="default" size="100%">Pattern Analysis and Machine Intelligence, IEEE Transactions on</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2015</style></year><pub-dates><date><style  face="normal" font="default" size="100%">June 2015</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><volume><style face="normal" font="default" size="100%">pp</style></volume><pages><style face="normal" font="default" size="100%">1</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;article&quot;&gt;&lt;p&gt;This paper addresses the problem of simultaneous estimation of different linear deformations, resulting in a global non-linear transformation, between an original object and its broken fragments. A general framework is proposed without using correspondences, where the solution of a polynomial system of equations directly provides the parameters of the alignment. We quantitatively evaluate the proposed algorithm on a large synthetic dataset containing 2D and 3D images, where linear (rigid-body and affine) transformations are considered. We also conduct an exhaustive analysis of the robustness against segmentation errors and the numerical stability of the proposed method. Moreover, we present experiments on 2D real images as well as on volumetric medical images.&lt;/p&gt;&lt;/div&gt;&lt;p&gt;&amp;nbsp;&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">99</style></issue><work-type><style face="normal" font="default" size="100%">Journal article</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jozsef Molnar</style></author><author><style face="normal" font="default" size="100%">Robert Frohlich</style></author><author><style face="normal" font="default" size="100%">Chetverikov Dmitrij</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Abdesselam Bouzerdoum</style></author><author><style face="normal" font="default" size="100%">Lei Wang</style></author><author><style face="normal" font="default" size="100%">Philip Ogunbona</style></author><author><style face="normal" font="default" size="100%">Wanqing Li</style></author><author><style face="normal" font="default" size="100%">Son Lam Phung</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">3D Reconstruction of Planar Patches Seen by Omnidirectional Cameras</style></title><secondary-title><style face="normal" font="default" size="100%">International Conference on Digital Image Computing: Techniques and Applications (DICTA)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Wollongong, Australia</style></pub-location><pages><style face="normal" font="default" size="100%">1-8</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jozsef Molnar</style></author><author><style face="normal" font="default" size="100%">Rui Huang</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jian Zhang</style></author><author><style face="normal" font="default" size="100%">Mohammed Bennamoun</style></author><author><style face="normal" font="default" size="100%">Fatih Porikli</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">3D Reconstruction of Planar Surface Patches: A Direct Solution</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the ACCV Workshop on Big Data in 3D Computer Vision (ACCV-BigData3DCV)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Nov 2014</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Singapore, Szingapúr</style></pub-location><pages><style face="normal" font="default" size="100%">1-8.</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zsolt Santa</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Michael Felsberg</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Affine Alignment of Occluded Shapes</style></title><secondary-title><style face="normal" font="default" size="100%">International Conference on Pattern Recognition (ICPR)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Aug 2014</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Stockholm, Svédország</style></pub-location><pages><style face="normal" font="default" size="100%">2155-2160</style></pages><isbn><style face="normal" font="default" size="100%">978-4-9906441-0-9</style></isbn><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Attila Tanacs</style></author><author><style face="normal" font="default" size="100%">András Majdik</style></author><author><style face="normal" font="default" size="100%">Jozsef Molnar</style></author><author><style face="normal" font="default" size="100%">Atul Rai</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Abdesselam Bouzerdoum</style></author><author><style face="normal" font="default" size="100%">Lei Wang</style></author><author><style face="normal" font="default" size="100%">Philip Ogunbona</style></author><author><style face="normal" font="default" size="100%">Wanqing Li</style></author><author><style face="normal" font="default" size="100%">Son Lam Phung</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Establishing Correspondences between Planar Image Patches</style></title><secondary-title><style face="normal" font="default" size="100%">International Conference on Digital Image Computing: Techniques and Applications (DICTA)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2014</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Wollongong, Australia</style></pub-location><pages><style face="normal" font="default" size="100%">1-7</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Péter Balázs</style></author><author><style face="normal" font="default" size="100%">Endre Katona</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Antal Nagy</style></author><author><style face="normal" font="default" size="100%">Gábor Németh</style></author><author><style face="normal" font="default" size="100%">László Gábor Nyúl</style></author><author><style face="normal" font="default" size="100%">Kálmán Palágyi</style></author><author><style face="normal" font="default" size="100%">Attila Tanacs</style></author><author><style face="normal" font="default" size="100%">László Gábor Varga</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Roland Kunkli</style></author><author><style face="normal" font="default" size="100%">Ildikó Papp</style></author><author><style face="normal" font="default" size="100%">Edéné Rutkovszky</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Képfeldolgozás a szegedi informatikus-képzésben</style></title><secondary-title><style face="normal" font="default" size="100%">Informatika a felsőoktatásban 2014</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2014</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">University of Debrecen</style></publisher><pub-location><style face="normal" font="default" size="100%">Debrecen, Hungary</style></pub-location><pages><style face="normal" font="default" size="100%">667-675</style></pages><language><style face="normal" font="default" size="100%">hun</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Attila Tanacs</style></author><author><style face="normal" font="default" size="100%">Joakim Lindblad</style></author><author><style face="normal" font="default" size="100%">Nataša Sladoje</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">László Czúni</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">2D és 3D bináris objektumok lineáris deformáció-becslésének numerikus megoldási lehetőségei</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2013</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2013</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">NJSZT-KÉPAF</style></publisher><pub-location><style face="normal" font="default" size="100%">Veszprém</style></pub-location><pages><style face="normal" font="default" size="100%">526 - 541</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zsolt Santa</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Correspondence-less non-rigid registration of triangular surface meshes</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year><pub-dates><date><style  face="normal" font="default" size="100%">June 2013</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Portland, OR, USA</style></pub-location><pages><style face="normal" font="default" size="100%">2275 - 2282</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;A novel correspondence-less approach is proposed to find a thin plate spline map between a pair of deformable 3D objects represented by triangular surface meshes. The proposed method works without landmark extraction and feature correspondences. The aligning transformation is found simply by solving a system of nonlinear equations. Each equation is generated by integrating a nonlinear function over the object's domains. We derive recursive formulas for the efficient computation of these integrals. Based on a series of comparative tests on a large synthetic dataset, our triangular mesh-based algorithm outperforms state of the art methods both in terms of computing time and accuracy. The applicability of the proposed approach has been demonstrated on the registration of 3D lung CT volumes. © 2013 IEEE.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">ScopusID: 84887348013doi: 10.1109/CVPR.2013.295</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zsolt Santa</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Geoff West</style></author><author><style face="normal" font="default" size="100%">Péter Kövesi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Elastic Registration of 3D Deformable Objects</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of International Conference on Digital Image Computing: Techniques and Applications (DICTA)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Nov 2013</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/~kato/papers/dicta2012.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">New York</style></pub-location><pages><style face="normal" font="default" size="100%">1 - 7</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;A novel correspondence-less approach is proposed to find a non-linear aligning transformation between a pair of deformable 3D objects. Herein, we consider a polynomial deformation model, but our framework can be easily adapted to other common deformations. The basic idea of the proposed method is to set up a system of nonlinear equations whose solution directly provides the parameters of the aligning transformation. Each equation is generated by integrating a nonlinear function over the object's domains. Thus the number of equations is determined by the number of adopted nonlinear functions yielding a flexible mechanism to generate sufficiently many equations. While classical approaches would establish correspondences between the shapes, our method works without landmarks. The efficiency of the proposed approach has been demonstrated on a large synthetic dataset as well as in the context of medical image registration.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000316318400010doi: 10.1109/DICTA.2012.6411674</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Endre Juhász</style></author><author><style face="normal" font="default" size="100%">Attila Tanacs</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Giovanni Ramponi</style></author><author><style face="normal" font="default" size="100%">Sven Lončarić</style></author><author><style face="normal" font="default" size="100%">Alberto Carini</style></author><author><style face="normal" font="default" size="100%">Karen Egiazarian</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Evaluation of Point Matching Methods for Wide-baseline Stereo Correspondence on Mobile Platforms</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the International Symposium on Image and Signal Processing and Analysis (ISPA)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Sep 2013</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Trieste</style></pub-location><pages><style face="normal" font="default" size="100%">806 - 811</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;article&quot;&gt;&lt;p&gt;Wide-baseline stereo matching is a common problem of computer vision. By the explosion of smartphones equipped with camera modules, many classical computer vision solutions have been adapted to such platforms. Considering the widespread use of various networking options for mobile phones, one can consider a set of smart phones as an ad-hoc camera network, where each camera is equipped with a more and more powerful computing engine in addition to a limited bandwidth communication with other devices. Therefore the performance of classical vision algorithms in a collaborative mobile environment is of particular interest. In such a scenario we expect that the images are taken almost simultaneously but from different viewpoints, implying that the camera poses are significantly different but lighting conditions are the same. In this work, we provide quantitative comparison of the most important keypoint detectors and descriptors in the context of wide baseline stereo matching. We found that for resolution of 2 megapixels images the current mobile hardware is capable of providing results efficiently.&lt;/p&gt;&lt;/div&gt;&lt;p&gt;&amp;nbsp;&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Paul Richard</style></author><author><style face="normal" font="default" size="100%">Gabriela Csurka</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Linear and nonlinear shape alignment without correspondences</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of International Joint Conference on Computer Vision, Imaging and Computer Graphics - Theory and Applications (Revised Selected Papers)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Communications in Computer and Information Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Feb 2013</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/~kato/papers/visapp2012.pdf</style></url></web-urls></urls><number><style face="normal" font="default" size="100%">359</style></number><publisher><style face="normal" font="default" size="100%">Springer Verlag</style></publisher><pub-location><style face="normal" font="default" size="100%">Berlin; Heidelberg; New York; London; Paris; Tokyo</style></pub-location><pages><style face="normal" font="default" size="100%">3 - 17</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;abstract-content formatted&quot; itemprop=&quot;description&quot;&gt;&lt;p class=&quot;a-plus-plus&quot;&gt;We consider the estimation of diffeomorphic deformations aligning a known binary shape and its distorted observation. The classical solution consists in extracting landmarks, establishing correspondences and then the aligning transformation is obtained via a complex optimization procedure. Herein we present an alternative solution which works without landmark correspondences, is independent of the magnitude of transformation, easy to implement, and has a linear time complexity. The proposed universal framework is capable of recovering linear as well as nonlinear deformations.&lt;/p&gt;&lt;/div&gt;&lt;p&gt;&amp;nbsp;&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zsolt Santa</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Paulo de Souza</style></author><author><style face="normal" font="default" size="100%">Ulrich Engelke</style></author><author><style face="normal" font="default" size="100%">Ashfaqur Rahman</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Pose Estimation of Ad-hoc Mobile Camera Networks</style></title><secondary-title><style face="normal" font="default" size="100%">International Conference on Digital Image Computing: Techniques and Applications (DICTA)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2013</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Hobart, TAS </style></pub-location><pages><style face="normal" font="default" size="100%">88 - 95</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;article&quot;&gt;&lt;p&gt;An algorithm is proposed for the pose estimation of ad-hoc mobile camera networks with overlapping views. The main challenge is to estimate camera parameters with respect to the 3D scene without any specific calibration pattern, hence allowing for a consistent, camera-independent world coordinate system. The only assumption about the scene is that it contains a planar surface patch of a low-rank texture, which is visible in at least two cameras. Such low-rank patterns are quite common in urban environments. The proposed algorithm consists of three main steps: relative pose estimation of the cameras within the network, followed by the localization of the network within the 3D scene using a low-rank surface patch, and finally the estimation of a consistent scale for the whole system. The algorithm follows a distributed architecture, hence the computing power of the participating mobile devices are efficiently used. The performance and robustness of the proposed algorithm have been analyzed on both synthetic and real data. Experimental results confirmed the relevance and applicability of the method.&lt;/p&gt;&lt;/div&gt;&lt;p&gt;&amp;nbsp;&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><accession-num><style face="normal" font="default" size="100%">14000303 </style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tamás Levente</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jian Zhang</style></author><author><style face="normal" font="default" size="100%">Mohammed Bennamoun</style></author><author><style face="normal" font="default" size="100%">Dan Schonfeld</style></author><author><style face="normal" font="default" size="100%">Zhengyou Zhang</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Targetless Calibration of a Lidar - Perspective Camera Pair</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of ICCV Workshop on Big Data in 3D Computer Vision</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Dec 2013</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Sydney, NSW </style></pub-location><pages><style face="normal" font="default" size="100%">668 - 675</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;article&quot;&gt;&lt;p&gt;A novel method is proposed for the &lt;span class=&quot;snippet&quot;&gt;calibration&lt;/span&gt; of a &lt;span class=&quot;snippet&quot;&gt;camera&lt;/span&gt; - 3D &lt;span class=&quot;snippet&quot;&gt;lidar&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;pair&lt;/span&gt; without the use of any special &lt;span class=&quot;snippet&quot;&gt;calibration&lt;/span&gt; pattern or point correspondences. The proposed method has no specific assumption about the data source: plain depth information is expected from the &lt;span class=&quot;snippet&quot;&gt;lidar&lt;/span&gt; scan and a simple &lt;span class=&quot;snippet&quot;&gt;perspective&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;camera&lt;/span&gt; is used for the 2D images. The &lt;span class=&quot;snippet&quot;&gt;calibration&lt;/span&gt; is solved as a 2D-3D registration problem using a minimum of one (for extrinsic) or two (for intrinsic-extrinsic) planar regions visible in both cameras. The registration is then traced back to the solution of a non-linear system of equations which directly provides the &lt;span class=&quot;snippet&quot;&gt;calibration&lt;/span&gt; parameters between the bases of the two sensors. The method has been tested on a large set of synthetic &lt;span class=&quot;snippet&quot;&gt;lidar&lt;/span&gt;-&lt;span class=&quot;snippet&quot;&gt;camera&lt;/span&gt; image &lt;span class=&quot;snippet&quot;&gt;pairs&lt;/span&gt; as well as on real data acquired in outdoor environment.&lt;/p&gt;&lt;/div&gt;&lt;p&gt;&amp;nbsp;&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><accession-num><style face="normal" font="default" size="100%">14147882 </style></accession-num><notes><style face="normal" font="default" size="100%">doi: 10.1109/ICCVW.2013.92</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A unifying framework for correspondence-less shape alignment and its medical applications</style></title><secondary-title><style face="normal" font="default" size="100%"> Intelligent Interactive Technologies and Multimedia </style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Communications in Computer and Information Science</style></tertiary-title><short-title><style face="normal" font="default" size="100%">COMMUN COMPUT INFORM SCI</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year><pub-dates><date><style  face="normal" font="default" size="100%">March 2013</style></date></pub-dates></dates><number><style face="normal" font="default" size="100%">276</style></number><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Allahabad, India</style></pub-location><volume><style face="normal" font="default" size="100%">276 CCIS</style></volume><pages><style face="normal" font="default" size="100%">40 - 52</style></pages><isbn><style face="normal" font="default" size="100%">1865-0929</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;We give an overview of our general framework for registering 2D and 3D objects without correspondences. Classical solutions consist in extracting landmarks, establishing correspondences and then the aligning transformation is obtained via a complex optimization procedure. In contrast, our framework works without landmark correspondences, is independent of the magnitude of transformation, easy to implement, and has a linear time complexity. The efficiency and robustness of the method has been demonstarted using various deformations models. Herein, we will focus on medical applications. © 2013 Springer-Verlag.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">ScopusID: 84875170012doi: 10.1007/978-3-642-37463-0_4T3 2nd International Conference on Intelligent Interactive Technologies and Multimedia, IITM 2013Y2 9 March 2013 through 11 March 2013
CY Allahabad
</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>9</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Viktor Varjas</style></author><author><style face="normal" font="default" size="100%">Attila Tanacs</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Affine Registration of 3D Objects</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2012///</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/~kato/software/affbin3dregdemo.html</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;This is the sample implementation and benchmark dataset of the binary image registration algorithm described in the following papers: Attila Tanacs and Zoltan Kato. Fast Linear Registration of 3D Objects Segmented from Medical Images. In Proceedings of International Conference on BioMedical Engineering and Informatics, Shanghai, China, pages 299--303, October 2011. IEEE. Attila Tanacs, Joakim Lindblad, Natasa Sladoje and Zoltan Kato. Estimation of Linear Deformations of 3D Objects. In Proceedings of International Conference on Image Processing, Hong Kong, China, pp. 153-156, September 2010. IEEE.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Software</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>6</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Markov random fields in image segmentation</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2012</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Now Publishers</style></publisher><pub-location><style face="normal" font="default" size="100%">Hanover, NH</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;Markov Random Fields in Image Segmentation introduces the fundamentals of Markovian modeling in image segmentation as well as providing a brief overview of recent advances in the field.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Book</style></work-type><notes><style face="normal" font="default" size="100%">doi: 10.1561/2000000035</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Molnar</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ian Jermyn</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jan-Olof Eklundh</style></author><author><style face="normal" font="default" size="100%">Yuichi Ohta</style></author><author><style face="normal" font="default" size="100%">Steven Tanimoto</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A Multi-Layer Phase Field Model for Extracting Multiple Near-Circular Objects</style></title><secondary-title><style face="normal" font="default" size="100%">International Conference on Pattern Recognition (ICPR)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Nov 2012</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Tsukuba, Japan</style></pub-location><pages><style face="normal" font="default" size="100%">1427 - 1430</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4673-2216-4 </style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;article&quot;&gt;&lt;p&gt;This paper proposes a functional that assigns low `energy' to sets of subsets of the image domain consisting of a number of possibly overlapping &lt;span class=&quot;snippet&quot;&gt;near&lt;/span&gt;-&lt;span class=&quot;snippet&quot;&gt;circular&lt;/span&gt; regions of approximately a given radius: a `gas of circles'. The &lt;span class=&quot;snippet&quot;&gt;model&lt;/span&gt; can be used as a prior for &lt;span class=&quot;snippet&quot;&gt;object&lt;/span&gt; extraction whenever the &lt;span class=&quot;snippet&quot;&gt;objects&lt;/span&gt; conform to the `gas of circles' geometry, e.g. cells in biological images. Configurations are represented by a &lt;span class=&quot;snippet&quot;&gt;multi&lt;/span&gt;-&lt;span class=&quot;snippet&quot;&gt;layer&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;phase&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;field&lt;/span&gt;. Each &lt;span class=&quot;snippet&quot;&gt;layer&lt;/span&gt; has an associated function, regions being defined by thresholding. Intra-&lt;span class=&quot;snippet&quot;&gt;layer&lt;/span&gt; interactions assign low energy to configurations consisting of non-overlapping &lt;span class=&quot;snippet&quot;&gt;near&lt;/span&gt;-&lt;span class=&quot;snippet&quot;&gt;circular&lt;/span&gt; regions, while overlapping regions are represented in separate layers. Inter-&lt;span class=&quot;snippet&quot;&gt;layer&lt;/span&gt; interactions penalize overlaps. Here we present a theoretical and experimental analysis of the &lt;span class=&quot;snippet&quot;&gt;model&lt;/span&gt;.&lt;/p&gt;&lt;/div&gt;&lt;p&gt;&amp;nbsp;&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><accession-num><style face="normal" font="default" size="100%">13324819</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Jozsef Nemeth</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Nonlinear Shape Registration without Correspondences</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE</style></secondary-title><short-title><style face="normal" font="default" size="100%">IEEE T PATTERN ANAL</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2012</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/~kato/papers/TPAMI-2010-03-0146.R2_Kato.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><volume><style face="normal" font="default" size="100%">34</style></volume><pages><style face="normal" font="default" size="100%">943 - 958</style></pages><isbn><style face="normal" font="default" size="100%">0162-8828</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;article&quot;&gt;&lt;p&gt;In this paper, we propose a novel framework to estimate the parameters of a diffeomorphism that aligns a known &lt;span class=&quot;snippet&quot;&gt;shape&lt;/span&gt; and its distorted observation. Classical &lt;span class=&quot;snippet&quot;&gt;registration&lt;/span&gt; methods first establish &lt;span class=&quot;snippet&quot;&gt;correspondences&lt;/span&gt; between the &lt;span class=&quot;snippet&quot;&gt;shapes&lt;/span&gt; and then compute the transformation parameters from these landmarks. Herein, we trace back the problem to the solution of a system of &lt;span class=&quot;snippet&quot;&gt;nonlinear&lt;/span&gt; equations which directly gives the parameters of the aligning transformation. The proposed method provides a generic framework to recover any diffeomorphic deformation &lt;span class=&quot;snippet&quot;&gt;without&lt;/span&gt; established &lt;span class=&quot;snippet&quot;&gt;correspondences&lt;/span&gt;. It is easy to implement, not sensitive to the strength of the deformation, and robust against segmentation errors. The method has been applied to several commonly used transformation models. The performance of the proposed framework has been demonstrated on large synthetic data sets as well as in the context of various applications.&lt;/p&gt;&lt;/div&gt;&lt;p&gt;&amp;nbsp;&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">5</style></issue><work-type><style face="normal" font="default" size="100%">Journal article</style></work-type><accession-num><style face="normal" font="default" size="100%">12617610 </style></accession-num><notes><style face="normal" font="default" size="100%">UT: 000301747400009doi: 10.1109/TPAMI.2011.200</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Imtnan-Ul-Haque Qazi</style></author><author><style face="normal" font="default" size="100%">Oliver Alata</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Christine Fernandez-Maloigne</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Parametric Stochastic Modeling for Color Image Segmentation and Texture Characterization</style></title><secondary-title><style face="normal" font="default" size="100%">Advanced color image processing and analysis</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2012</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Berlin; Heidelberg; New York; London; Paris; Tokyo</style></pub-location><pages><style face="normal" font="default" size="100%">279 - 325</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4419-6189-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;abstract-content formatted&quot; itemprop=&quot;description&quot;&gt;&lt;p class=&quot;a-plus-plus&quot;&gt;&lt;em class=&quot;a-plus-plus&quot;&gt;Black should be made a color of light&lt;/em&gt; Clemence Boulouque&lt;/p&gt;&lt;p class=&quot;a-plus-plus&quot;&gt;Parametric stochastic models offer the definition of color and/or texture features based on model parameters, which is of interest for color texture classification, segmentation and synthesis.&lt;/p&gt;&lt;p class=&quot;a-plus-plus&quot;&gt;In this chapter, distribution of colors in the images through various parametric approximations including multivariate Gaussian distribution, multivariate Gaussian mixture models (MGMM) and Wishart distribution, is discussed. In the context of Bayesian color image segmentation, various aspects of sampling from the posterior distributions to estimate the color distribution from MGMM and the label field, using different move types are also discussed. These include reversible jump mechanism from MCMC methodology. Experimental results on color images are presented and discussed.&lt;/p&gt;&lt;p class=&quot;a-plus-plus&quot;&gt;Then, we give some materials for the description of color spatial structure using Markov Random Fields (MRF), and more particularly multichannel GMRF, and multichannel linear prediction models. In this last approach, two dimensional complex multichannel versions of both causal and non-causal models are discussed to perform the simultaneous parametric power spectrum estimation of the luminance and the chrominance channels of the color image. Application of these models to the classification and segmentation of color texture images is also illustrated.&lt;/p&gt;&lt;/div&gt;&lt;p&gt;&amp;nbsp;&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Book chapter</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jan-Olof Eklundh</style></author><author><style face="normal" font="default" size="100%">Yuichi Ohta</style></author><author><style face="normal" font="default" size="100%">Steven Tanimoto</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Simultaneous Affine Registration of Multiple Shapes</style></title><secondary-title><style face="normal" font="default" size="100%">International Conference on Pattern Recognition (ICPR)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Nov 2012</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Tsukuba, Japan</style></pub-location><pages><style face="normal" font="default" size="100%">9 - 12</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4673-2216-4 </style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;article&quot;&gt;&lt;p&gt;The problem of simultaneously estimating &lt;span class=&quot;snippet&quot;&gt;affine&lt;/span&gt; deformations between &lt;span class=&quot;snippet&quot;&gt;multiple&lt;/span&gt; objects occur in many applications. Herein, a direct method is proposed which provides the result as a solution of a linear system of equations without establishing correspondences between the objects. The key idea is to construct enough linearly independent equations using covariant functions, and then finding the solution simultaneously for all &lt;span class=&quot;snippet&quot;&gt;affine&lt;/span&gt; transformations. Quantitative evaluation confirms the performance of the method.&lt;/p&gt;&lt;/div&gt;&lt;p&gt;&amp;nbsp;&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><accession-num><style face="normal" font="default" size="100%">13324478 </style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jhimli Mitra</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Soumya Ghose</style></author><author><style face="normal" font="default" size="100%">Desire Sidibe</style></author><author><style face="normal" font="default" size="100%">Robert Martí</style></author><author><style face="normal" font="default" size="100%">Xavier Lladó</style></author><author><style face="normal" font="default" size="100%">Oliver Arnau</style></author><author><style face="normal" font="default" size="100%">Joan C Vilanova</style></author><author><style face="normal" font="default" size="100%">Fabrice Meriaudeau</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jan-Olof Eklundh</style></author><author><style face="normal" font="default" size="100%">Yuichi Ohta</style></author><author><style face="normal" font="default" size="100%">Steven Tanimoto</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Spectral clustering to model deformations for fast multimodal prostate registration</style></title><secondary-title><style face="normal" font="default" size="100%">International Conference on Pattern Recognition (ICPR)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Nov 2012</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://hal.archives-ouvertes.fr/docs/00/71/09/43/PDF/ICPR_Jhimli.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Tsukuba, Japan</style></pub-location><pages><style face="normal" font="default" size="100%">2622 - 2625</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4673-2216-4 </style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;article&quot;&gt;&lt;p&gt;This paper proposes a method &lt;span class=&quot;snippet&quot;&gt;to&lt;/span&gt; learn &lt;span class=&quot;snippet&quot;&gt;deformation&lt;/span&gt; parameters off-line for &lt;span class=&quot;snippet&quot;&gt;fast&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;multimodal&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;registration&lt;/span&gt; of ultrasound and magnetic resonance &lt;span class=&quot;snippet&quot;&gt;prostate&lt;/span&gt; images during ultrasound guided needle biopsy. The &lt;span class=&quot;snippet&quot;&gt;registration&lt;/span&gt; method involves &lt;span class=&quot;snippet&quot;&gt;spectral&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;clustering&lt;/span&gt; of the &lt;span class=&quot;snippet&quot;&gt;deformation&lt;/span&gt; parameters obtained from a spline-based nonlinear diffeomorphism between training magnetic resonance and ultrasound &lt;span class=&quot;snippet&quot;&gt;prostate&lt;/span&gt; images. The &lt;span class=&quot;snippet&quot;&gt;deformation&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;models&lt;/span&gt; built from the principal eigen-modes of the &lt;span class=&quot;snippet&quot;&gt;clusters&lt;/span&gt; are then applied on a test magnetic resonance image &lt;span class=&quot;snippet&quot;&gt;to&lt;/span&gt; register with the test ultrasound &lt;span class=&quot;snippet&quot;&gt;prostate&lt;/span&gt; image. The &lt;span class=&quot;snippet&quot;&gt;deformation&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;model&lt;/span&gt; with the least &lt;span class=&quot;snippet&quot;&gt;registration&lt;/span&gt; error is finally chosen as the optimal &lt;span class=&quot;snippet&quot;&gt;model&lt;/span&gt; for deformable &lt;span class=&quot;snippet&quot;&gt;registration&lt;/span&gt;. The rationale behind &lt;span class=&quot;snippet&quot;&gt;modeling&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;deformations&lt;/span&gt; is &lt;span class=&quot;snippet&quot;&gt;to&lt;/span&gt; achieve &lt;span class=&quot;snippet&quot;&gt;fast&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;multimodal&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;registration&lt;/span&gt; of &lt;span class=&quot;snippet&quot;&gt;prostate&lt;/span&gt; images while maintaining &lt;span class=&quot;snippet&quot;&gt;registration&lt;/span&gt; accuracies which is otherwise computationally expensive. The method is validated for 25 patients each with a pair of corresponding magnetic resonance and ultrasound images in a leave-one-out validation framework. The average &lt;span class=&quot;snippet&quot;&gt;registration&lt;/span&gt; accuracies i.e. Dice similarity coefficient of 0.927 ± 0.025, 95% Hausdorff distance of 5.14 ± 3.67 mm and target &lt;span class=&quot;snippet&quot;&gt;registration&lt;/span&gt; error of 2.44 ± 1.17 mm are obtained by our method with a speed-up in computation time by 98% when compared &lt;span class=&quot;snippet&quot;&gt;to&lt;/span&gt; Mitra et al. [7].&lt;/p&gt;&lt;/div&gt;&lt;p&gt;&amp;nbsp;&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><accession-num><style face="normal" font="default" size="100%">13325059</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jhimli Mitra</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Robert Martí</style></author><author><style face="normal" font="default" size="100%">Oliver Arnau</style></author><author><style face="normal" font="default" size="100%">Xavier Lladó</style></author><author><style face="normal" font="default" size="100%">Desire Sidibe</style></author><author><style face="normal" font="default" size="100%">Soumya Ghose</style></author><author><style face="normal" font="default" size="100%">Joan C Vilanova</style></author><author><style face="normal" font="default" size="100%">Josep Comet</style></author><author><style face="normal" font="default" size="100%">Fabrice Meriaudeau</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A spline-based non-linear diffeomorphism for multimodal prostate registration.</style></title><secondary-title><style face="normal" font="default" size="100%">MEDICAL IMAGE ANALYSIS</style></secondary-title><short-title><style face="normal" font="default" size="100%">MED IMAGE ANAL</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Aug 2012</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">16</style></volume><pages><style face="normal" font="default" size="100%">1259 - 1279</style></pages><isbn><style face="normal" font="default" size="100%">1361-8415</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;This paper presents a novel method for non-rigid registration of transrectal ultrasound and magnetic resonance prostate images based on a non-linear regularized framework of point correspondences obtained from a statistical measure of shape-contexts. The segmented prostate shapes are represented by shape-contexts and the Bhattacharyya distance between the shape representations is used to find the point correspondences between the 2D fixed and moving images. The registration method involves parametric estimation of the non-linear diffeomorphism between the multimodal images and has its basis in solving a set of non-linear equations of thin-plate splines. The solution is obtained as the least-squares solution of an over-determined system of non-linear equations constructed by integrating a set of non-linear functions over the fixed and moving images. However, this may not result in clinically acceptable transformations of the anatomical targets. Therefore, the regularized bending energy of the thin-plate splines along with the localization error of established correspondences should be included in the system of equations. The registration accuracies of the proposed method are evaluated in 20 pairs of prostate mid-gland ultrasound and magnetic resonance images. The results obtained in terms of Dice similarity coefficient show an average of 0.980+/-0.004, average 95% Hausdorff distance of 1.63+/-0.48mm and mean target registration and target localization errors of 1.60+/-1.17mm and 0.15+/-0.12mm respectively.&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">6</style></issue><work-type><style face="normal" font="default" size="100%">Journal article</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000309694100015ScopusID: 84866118888doi: 10.1016/j.media.2012.04.006</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Aurélio Campilho</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A Unifying Framework for Correspondence-less Linear Shape Alignment</style></title><secondary-title><style face="normal" font="default" size="100%">International Conference on Image Analysis and Recognition (ICIAR)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title><short-title><style face="normal" font="default" size="100%">LNCS</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year><pub-dates><date><style  face="normal" font="default" size="100%">June 2012</style></date></pub-dates></dates><number><style face="normal" font="default" size="100%">7324</style></number><publisher><style face="normal" font="default" size="100%">Springer Verlag</style></publisher><pub-location><style face="normal" font="default" size="100%">Aveiro, Portugal</style></pub-location><pages><style face="normal" font="default" size="100%">277 - 284</style></pages><isbn><style face="normal" font="default" size="100%">978-3-642-31294-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;abstract-content formatted&quot; itemprop=&quot;description&quot;&gt;&lt;p class=&quot;a-plus-plus&quot;&gt;We consider the estimation of linear transformations aligning a known binary shape and its distorted observation. The classical way to solve this registration problem is to find correspondences between the two images and then compute the transformation parameters from these landmarks. Here we propose a unified framework where the exact transformation is obtained as the solution of either a polynomial or a linear system of equations without establishing correspondences. The advantages of the proposed solutions are that they are fast, easy to implement, have linear time complexity, work without landmark correspondences and are independent of the magnitude of transformation.&lt;/p&gt;&lt;/div&gt;&lt;p&gt;&amp;nbsp;&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000323558000033</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zsolt Santa</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Unifying Framework for Non-linear Registration of 3D Objects</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE International Conference on Cognitive Infocommunications (CogInfoCom)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Dec 2012</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/~kato/papers/coginfocomm2012.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Kosice, Slovakia </style></pub-location><pages><style face="normal" font="default" size="100%">547 - 552</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4673-5187-4 </style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;article&quot;&gt;&lt;p&gt;An extension of our earlier work is proposed to find a &lt;span class=&quot;snippet&quot;&gt;non&lt;/span&gt;-&lt;span class=&quot;snippet&quot;&gt;linear&lt;/span&gt; aligning transformation between a pair of deformable &lt;span class=&quot;snippet&quot;&gt;3D&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;objects&lt;/span&gt;. The basic idea is to set up a system of nonlinear equations whose solution directly provides the parameters of the aligning transformation. Each equation is generated by integrating a nonlinear function over the &lt;span class=&quot;snippet&quot;&gt;object&lt;/span&gt;'s domains. Thus the number of equations is determined by the number of adopted nonlinear functions yielding a flexible mechanism to generate sufficiently many equations. While classical approaches would establish correspondences between the shapes, our method works without landmarks. Experiments with &lt;span class=&quot;snippet&quot;&gt;3D&lt;/span&gt; polynomial and thin plate spline deformations confirm the performance of the &lt;span class=&quot;snippet&quot;&gt;framework&lt;/span&gt;.&lt;/p&gt;&lt;/div&gt;&lt;p&gt;&amp;nbsp;&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000320454200086</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Attila Tanacs</style></author><author><style face="normal" font="default" size="100%">Joakim Lindblad</style></author><author><style face="normal" font="default" size="100%">Nataša Sladoje</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Kálmán Palágyi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">3D objektumok lineáris deformációinak becslése</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2011</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2011</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">NJSZT</style></publisher><pub-location><style face="normal" font="default" size="100%">Szeged</style></pub-location><pages><style face="normal" font="default" size="100%">471 - 480</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Kálmán Palágyi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Affin Puzzle: Deformált objektumdarabok helyreállítása megfeleltetések nélkül</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2011</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2011</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/kepaf2011/pdfs/S05_03.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">NJSZT</style></publisher><pub-location><style face="normal" font="default" size="100%">Szeged</style></pub-location><pages><style face="normal" font="default" size="100%">206 - 220</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">Kuba Attila Díjas cikk.</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Mihály Gara</style></author><author><style face="normal" font="default" size="100%">Péter Balázs</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Kálmán Palágyi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Bináris tomográfiai rekonstrukció objektum alapú evolúciós algoritmussal</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2011</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2011</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">NJSZT</style></publisher><pub-location><style face="normal" font="default" size="100%">Szeged</style></pub-location><pages><style face="normal" font="default" size="100%">117 - 127</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Milan Lesko</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Antal Nagy</style></author><author><style face="normal" font="default" size="100%">Imre Gombos</style></author><author><style face="normal" font="default" size="100%">Zsolt Török</style></author><author><style face="normal" font="default" size="100%">László Vígh</style></author><author><style face="normal" font="default" size="100%">László Vígh</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Kálmán Palágyi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Élősejt szegmentálása gráfvágás segítségével fluoreszcenciás mikroszkóp képeken</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2011</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2011</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/kepaf2011/pdfs/S08_02.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">NJSZT</style></publisher><pub-location><style face="normal" font="default" size="100%">Szeged</style></pub-location><pages><style face="normal" font="default" size="100%">319 - 328</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Attila Tanacs</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Yongsheng Ding</style></author><author><style face="normal" font="default" size="100%">Yonghong Peng</style></author><author><style face="normal" font="default" size="100%">Riyi Shi</style></author><author><style face="normal" font="default" size="100%">Kuangrong Hao</style></author><author><style face="normal" font="default" size="100%">Lipo Wang</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Fast linear registration of 3D objects segmented from medical images</style></title><secondary-title><style face="normal" font="default" size="100%">Biomedical Engineering and Informatics (BMEI)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Oct 2011</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Shanghai</style></pub-location><pages><style face="normal" font="default" size="100%">294 - 298</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-9351-7 </style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;In this paper a linear registration framework is used for medical image registration using segmented binary objects. The method is best suited for problems where the segmentation is available, but we also propose a general bone segmentation approach for CT images. We focus on the case when the objects to be registered differ considerably because of segmentation errors. We check the applicability of the method to bone segmentation of pelvic and thoracic CT images. Comparison is also made against a classical mutual information-based registration method. © 2011 IEEE.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><accession-num><style face="normal" font="default" size="100%">12436502 </style></accession-num><notes><style face="normal" font="default" size="100%">ScopusID: 84855764850doi: 10.1109/BMEI.2011.6098290</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Péter Kardos</style></author><author><style face="normal" font="default" size="100%">Gábor Németh</style></author><author><style face="normal" font="default" size="100%">Kálmán Palágyi</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Kálmán Palágyi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Iterációnkénti simítással kombinált vékonyítás</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2011</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2011</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/kepaf2011/pdfs/S05_01.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">NJSZT</style></publisher><pub-location><style face="normal" font="default" size="100%">Szeged</style></pub-location><pages><style face="normal" font="default" size="100%">174 - 189</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Norbert Hantos</style></author><author><style face="normal" font="default" size="100%">Péter Balázs</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Kálmán Palágyi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Mediánszűrés alkalmazása algebrai rekonstrukciós módszerekben</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2011</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2011</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">NJSZT</style></publisher><pub-location><style face="normal" font="default" size="100%">Szeged</style></pub-location><pages><style face="normal" font="default" size="100%">106 - 116</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jozsef Nemeth</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ian Jermyn</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jacques Blanc-Talon</style></author><author><style face="normal" font="default" size="100%">Wilfried Philips</style></author><author><style face="normal" font="default" size="100%">Dan Popescu</style></author><author><style face="normal" font="default" size="100%">Paul Scheunders</style></author><author><style face="normal" font="default" size="100%">Richard Kleihorst</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A Multi-Layer 'Gas of Circles' Markov Random Field Model for the Extraction of Overlapping Near-Circular Objects</style></title><secondary-title><style face="normal" font="default" size="100%">Advances Concepts for Intelligent Vision Systems (ACIVS)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title><short-title><style face="normal" font="default" size="100%">LNCS</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Aug 2011</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/ipcg/publications/Year/2011.complete.xml#Nemeth-etal2011</style></url></web-urls></urls><number><style face="normal" font="default" size="100%">6915</style></number><publisher><style face="normal" font="default" size="100%">Springer-Verlag</style></publisher><pub-location><style face="normal" font="default" size="100%">Ghent, Belgium</style></pub-location><pages><style face="normal" font="default" size="100%">171 - 182</style></pages><isbn><style face="normal" font="default" size="100%">978-3-642-23686-0</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;We propose a multi-layer binary Markov random field (MRF) model that assigns high probability to object configurations in the image domain consisting of an unknown number of possibly touching or overlapping near-circular objects of approximately a given size. Each layer has an associated binary field that specifies a region corresponding to objects. Overlapping objects are represented by regions in different layers. Within each layer, long-range interactions favor connected components of approximately circular shape, while regions in different layers that overlap are penalized. Used as a prior coupled with a suitable data likelihood, the model can be used for object extraction from images, e.g. cells in biological images or densely-packed tree crowns in remote sensing images. We present a theoretical and experimental analysis of the model, and demonstrate its performance on various synthetic and biomedical images.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000306962700016</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jhimli Mitra</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Robert Martí</style></author><author><style face="normal" font="default" size="100%">Oliver Arnau</style></author><author><style face="normal" font="default" size="100%">Xavier Lladó</style></author><author><style face="normal" font="default" size="100%">Soumya Ghose</style></author><author><style face="normal" font="default" size="100%">Joan C Vilanova</style></author><author><style face="normal" font="default" size="100%">Fabrice Meriaudeau</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A non-linear diffeomorphic framework for prostate multimodal registration</style></title><secondary-title><style face="normal" font="default" size="100%">International Conference on Digital Image Computing: Techniques and Applications (DICTA)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Dec 2011</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Noosa, QLD </style></pub-location><pages><style face="normal" font="default" size="100%">31 - 36</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4577-2006-2 </style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;This paper presents a novel method for non-rigid registration of prostate multimodal images based on a nonlinear framework. The parametric estimation of the non-linear diffeomorphism between the 2D fixed and moving images has its basis in solving a set of non-linear equations of thin-plate splines. The regularized bending energy of the thin-plate splines along with the localization error of established correspondences is jointly minimized with the fixed and transformed image difference, where, the transformed image is represented by the set of non-linear equations defined over the moving image. The traditional thin-plate splines with established correspondences may provide good registration of the anatomical targets inside the prostate but may fail to provide improved contour registration. On the contrary, the proposed framework maintains the accuracy of registration in terms of overlap due to the non-linear thinplate spline functions while also producing smooth deformations of the anatomical structures inside the prostate as a result of established corrspondences. The registration accuracies of the proposed method are evaluated in 20 pairs of prostate midgland ultrasound and magnetic resonance images in terms of Dice similarity coefficient with an average of 0.982 ± 0.004, average 95% Hausdorff distance of 1.54 ± 0.46 mm and mean target registration and target localization errors of 1.90±1.27 mm and 0.15 ± 0.12 mm respectively. © 2011 IEEE.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><accession-num><style face="normal" font="default" size="100%">12476651 </style></accession-num><notes><style face="normal" font="default" size="100%">ScopusID: 84856980939doi: 10.1109/DICTA.2011.14</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>9</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltán Kornél Török</style></author><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Jozsef Nemeth</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Nonlinear Shape Registration without Correspondences</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2011///</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/~kato/software/planarhombinregdemo.html</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;This is the sample implementation and benchmark dataset of the nonlinear registration of 2D shapes described in the following papers: Csaba Domokos, Jozsef Nemeth, and Zoltan Kato. Nonlinear Shape Registration without Correspondences. IEEE Transactions on Pattern Analysis and Machine Intelligence, 34(5):943--958, May 2012. Note that the current demo program implements only planar homography deformations. Other deformations can be easily implemented based on the demo code.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Software</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>6</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">László Czúni</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Számítógépes látás</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2011</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Typotex Kiadó</style></publisher><pub-location><style face="normal" font="default" size="100%">Budapest</style></pub-location><language><style face="normal" font="default" size="100%">hun</style></language><work-type><style face="normal" font="default" size="100%">Book</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Gábor Németh</style></author><author><style face="normal" font="default" size="100%">Péter Kardos</style></author><author><style face="normal" font="default" size="100%">Kálmán Palágyi</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Kálmán Palágyi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A topológia-megőrzés elegendő feltételein alapuló 3D párhuzamos vékonyító algoritmusok</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2011</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2011</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/kepaf2011/pdfs/S05_02.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">NJSZT</style></publisher><pub-location><style face="normal" font="default" size="100%">Szeged</style></pub-location><pages><style face="normal" font="default" size="100%">190 - 205</style></pages><language><style face="normal" font="default" size="100%">hun</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">László Gábor Varga</style></author><author><style face="normal" font="default" size="100%">Péter Balázs</style></author><author><style face="normal" font="default" size="100%">Antal Nagy</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Kálmán Palágyi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Vetületi irányfüggőség a bináris tomográfiában</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2011</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2011</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">NJSZT</style></publisher><pub-location><style face="normal" font="default" size="100%">Szeged</style></pub-location><pages><style face="normal" font="default" size="100%">92 - 105</style></pages><language><style face="normal" font="default" size="100%">hun</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Kostas Daniilidis</style></author><author><style face="normal" font="default" size="100%">Petros Maragos</style></author><author><style face="normal" font="default" size="100%">Nikos Paragios</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Affine puzzle: Realigning deformed object fragments without correspondences</style></title><secondary-title><style face="normal" font="default" size="100%">European Conference on Computer Vision (ECCV)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title><short-title><style face="normal" font="default" size="100%">LNCS</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Sep 2010</style></date></pub-dates></dates><number><style face="normal" font="default" size="100%">6312</style></number><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Crete, Greece</style></pub-location><pages><style face="normal" font="default" size="100%">777 - 790</style></pages><isbn><style face="normal" font="default" size="100%">978-3-642-15551-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;This paper is addressing the problem of realigning broken objects without correspondences. We consider linear transformations between the object fragments and present the method through 2D and 3D affine transformations. The basic idea is to construct and solve a polynomial system of equations which provides the unknown parameters of the alignment. We have quantitatively evaluated the proposed algorithm on a large synthetic dataset containing 2D and 3D images. The results show that the method performs well and robust against segmentation errors. We also present experiments on 2D real images as well as on volumetric medical images applied to surgical planning. © 2010 Springer-Verlag.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000286164000056ScopusID: 78149337447doi: 10.1007/978-3-642-15552-9_56</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Attila Tanacs</style></author><author><style face="normal" font="default" size="100%">Joakim Lindblad</style></author><author><style face="normal" font="default" size="100%">Nataša Sladoje</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Estimation of linear deformations of 3D objects</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE International Conference on Image Processing (ICIP)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Sep 2010</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Hong Kong, Hong Kong</style></pub-location><pages><style face="normal" font="default" size="100%">153 - 156</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;We propose a registration method to find affine transformations between 3D objects by constructing and solving an overdetermined system of polynomial equations. We utilize voxel coverage information for more precise object boundary description. An iterative solution enables us to easily adjust the method to recover e.g. rigid-body and similarity transformations. Synthetic tests show the advantage of the voxel coverage representation, and reveal the robustness properties of our method against different types of segmentation errors. The method is tested on a real medical CT volume. © 2010 IEEE.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000287728000038ScopusID: 78651064516doi: 10.1109/ICIP.2010.5650932</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Milan Lesko</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Antal Nagy</style></author><author><style face="normal" font="default" size="100%">Imre Gombos</style></author><author><style face="normal" font="default" size="100%">Zsolt Török</style></author><author><style face="normal" font="default" size="100%">László Vígh</style></author><author><style face="normal" font="default" size="100%">László Vígh</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Aytul Ercil</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Live cell segmentation in fluorescence microscopy via graph cut</style></title><secondary-title><style face="normal" font="default" size="100%">20th international conference on pattern recognition (ICPR 2010)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Aug 2010</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Istanbul, Turkey</style></pub-location><pages><style face="normal" font="default" size="100%">1485 - 1488</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-7542-1 </style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;We propose a novel Markovian segmentation model which takes into account edge information. By construction, the model uses only pairwise interactions and its energy is submodular. Thus the exact energy minima is obtained via a max-flow/min-cut algorithm. The method has been quantitatively evaluated on synthetic images as well as on fluorescence microscopic images of live cells. © 2010 IEEE.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><accession-num><style face="normal" font="default" size="100%">11593484 </style></accession-num><notes><style face="normal" font="default" size="100%">ScopusID: 78149486419doi: 10.1109/ICPR.2010.367Besorolás: Konferenciaközlemény</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Parametric estimation of affine deformations of planar shapes</style></title><secondary-title><style face="normal" font="default" size="100%">PATTERN RECOGNITION</style></secondary-title><short-title><style face="normal" font="default" size="100%">PATTERN RECOGN</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year><pub-dates><date><style  face="normal" font="default" size="100%">March 2010</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">43</style></volume><pages><style face="normal" font="default" size="100%">569 - 578</style></pages><isbn><style face="normal" font="default" size="100%">0031-3203</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">3</style></issue><work-type><style face="normal" font="default" size="100%">Journal article</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000273094100003doi: 10.1016/j.patcog.2009.08.013</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Albert Dipanda</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Albert Dipanda</style></author><author><style face="normal" font="default" size="100%">Richard Chbeir</style></author><author><style face="normal" font="default" size="100%">Kokou Yetongnon</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">SITIS 2010: Track SIT editorial message: Signal and Image Technologies</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the 6th International Conference on Signal Image Technology and Internet Based Systems, SITIS 2010</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2010</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE Computer Society Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Kuala Lumpur</style></pub-location><pages><style face="normal" font="default" size="100%">XV</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">ScopusID: 79952549721</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Affine alignment of compound objects: A direct approach</style></title><secondary-title><style face="normal" font="default" size="100%">16th IEEE International Conference on Image Processing (ICIP), 2009</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Nov 2009</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Cairo, Egypt</style></pub-location><pages><style face="normal" font="default" size="100%">169 - 172</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-5653-6 </style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;A direct approach for parametric estimation of 2D affine deformations between compound shapes is proposed. It provides the result as a least-square solution of a linear system of equations. The basic idea is to fit Gaussian densities over the objects yielding covariant functions, which preserves the effect of the unknown transformation. Based on these functions, linear equations are constructed by integrating nonlinear functions over appropriate domains. The main advantages are: linear complexity, easy implementation, works without any time consuming optimization or established correspondences. Comparative tests show that it outperforms state-of-the-art methods both in terms of precision, robustness and complexity. ©2009 IEEE.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><accession-num><style face="normal" font="default" size="100%">11150920</style></accession-num><notes><style face="normal" font="default" size="100%">UT: 000280464300043ScopusID: 77951939917doi: 10.1109/ICIP.2009.5414195</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>9</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zsolt Katona</style></author><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Affine Registration of Planar Shapes</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2009///</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/~kato/software/affbinregdemo.html</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;This is the sample implementation and benchmark dataset of the binary image registration algorithm described in the following paper: Csaba Domokos and Zoltan Kato. Parametric Estimation of Affine Deformations of Planar Shapes. Pattern Recognition, 43(3):569--578, March 2010.&lt;/p&gt;</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Benedek</style></author><author><style face="normal" font="default" size="100%">Tamas Sziranyi</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Detection of Object Motion Regions in Aerial Image Pairs with a Multilayer Markovian Model</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE TRANSACTIONS ON IMAGE PROCESSING</style></secondary-title><short-title><style face="normal" font="default" size="100%">IEEE T IMAGE PROCESS</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2009</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><volume><style face="normal" font="default" size="100%">18</style></volume><pages><style face="normal" font="default" size="100%">2303 - 2315</style></pages><isbn><style face="normal" font="default" size="100%">1057-7149</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;We propose a new Bayesian method for detectingthe regions of object displacements in aerial image pairs. We use a robust but coarse 2-D image registration algorithm. Our main challenge is to eliminate the registration errors from the extracted change map. We introduce a three-layer Markov Random Field (L3MRF) model which integrates information from two different features, and ensures connected homogenous regions in the segmented images. Validation is given on real aerial photos.&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">10</style></issue><work-type><style face="normal" font="default" size="100%">Journal article</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000269715500013ScopusID: 70349442338doi: 10.1109/TIP.2009.2025808</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Peter Horvath</style></author><author><style face="normal" font="default" size="100%">Ian Jermyn</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A higher-order active contour model of a 'gas of circles' and its application to tree crown extraction</style></title><secondary-title><style face="normal" font="default" size="100%">PATTERN RECOGNITION</style></secondary-title><short-title><style face="normal" font="default" size="100%">PATTERN RECOGN</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2009///</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">42</style></volume><pages><style face="normal" font="default" size="100%">699 - 709</style></pages><isbn><style face="normal" font="default" size="100%">0031-3203</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">5</style></issue><notes><style face="normal" font="default" size="100%">UT: 000263431200011doi: 10.1016/j.patcog.2008.09.008</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tamás Blaskovics</style></author><author><style face="normal" font="default" size="100%">Ian Jermyn</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Dmitrij Chetverikov</style></author><author><style face="normal" font="default" size="100%">Tamas Sziranyi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Kör alakú objektumok szegmentálása Markov mező segítségével</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2009</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://vision.sztaki.hu/~kepaf/kepaf2009_CD/files/116-4-MRFCircle08.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Akaprint</style></publisher><pub-location><style face="normal" font="default" size="100%">Budapest</style></pub-location><pages><style face="normal" font="default" size="100%">1 - 9</style></pages><language><style face="normal" font="default" size="100%">hun</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">Received the Attila Kuba Prize</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tamás Blaskovics</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ian Jermyn</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Markov random field model for extracting near-circular shapes</style></title><secondary-title><style face="normal" font="default" size="100%">16th IEEE International Conference on Image Processing (ICIP)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Nov 2009</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Cairo, Egypt</style></pub-location><pages><style face="normal" font="default" size="100%">1073 - 1076</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-5653-6 </style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;We propose a binary Markov Random Field (MRF) model that assigns high probability to regions in the image domain consisting of an unknown number of circles of a given radius. We construct the model by discretizing the 'gas of circles' phase field model in a principled way, thereby creating an 'equivalent'MRF. The behaviour of the resultingMRF model is analyzed, and the performance of the new model is demonstrated on various synthetic images as well as on the problem of tree crown detection in aerial images. ©2009 IEEE.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000280464300268ScopusID: 77951945383doi: 10.1109/ICIP.2009.5413472</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jozsef Nemeth</style></author><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Nonlinear registration of binary shapes</style></title><secondary-title><style face="normal" font="default" size="100%">16th IEEE International Conference on Image Processing (ICIP)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Nov 2009</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Cairo, Egypt</style></pub-location><pages><style face="normal" font="default" size="100%">1101 - 1104</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-5653-6 </style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;A novel approach is proposed to estimate the parameters of a diffeomorphism that aligns two binary images. Classical approaches usually define a cost function based on a similarity metric and then find the solution via optimization. Herein, we trace back the problem to the solution of a system of non-linear equations which directly provides the parameters of the aligning transformation. The proposed method works without any time consuming optimization step or established correspondences. The advantage of our algorithm is that it is easy to implement, less sensitive to the strength of the deformation, and robust against segmentation errors. The efficiency of the proposed approach has been demonstrated on a large synthetic dataset as well as in the context of an industrial application. ©2009 IEEE.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000280464300275ScopusID: 77951946286doi: 10.1109/ICIP.2009.5413468</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Attila Tanacs</style></author><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Nataša Sladoje</style></author><author><style face="normal" font="default" size="100%">Joakim Lindblad</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Arnt-Borre Salberg</style></author><author><style face="normal" font="default" size="100%">Jon Yngve Hardeberg</style></author><author><style face="normal" font="default" size="100%">Robert Jenssen</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Recovering affine deformations of fuzzy shapes</style></title><secondary-title><style face="normal" font="default" size="100%">Image Analysis</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title><short-title><style face="normal" font="default" size="100%">LNCS</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">June 2009</style></date></pub-dates></dates><number><style face="normal" font="default" size="100%">5575</style></number><publisher><style face="normal" font="default" size="100%">Springer-Verlag</style></publisher><pub-location><style face="normal" font="default" size="100%">Oslo, Norway</style></pub-location><pages><style face="normal" font="default" size="100%">735 - 744</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;Fuzzy sets and fuzzy techniques are attracting increasing attention nowadays in the field of image processing and analysis. It has been shown that the information preserved by using fuzzy representation based on area coverage may be successfully utilized to improve precision and accuracy of several shape descriptors; geometric moments of a shape are among them. We propose to extend an existing binary shape matching method to take advantage of fuzzy object representation. The result of a synthetic test show that fuzzy representation yields smaller registration errors in average. A segmentation method is also presented to generate fuzzy segmentations of real images. The applicability of the proposed methods is demonstrated on real X-ray images of hip replacement implants. © 2009 Springer Berlin Heidelberg.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000268661000075ScopusID: 70350676212doi: 10.1007/978-3-642-02230-2_75</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jozsef Nemeth</style></author><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">IEEE</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Recovering planar homographies between 2D shapes</style></title><secondary-title><style face="normal" font="default" size="100%">12th International Conference on Computer Vision, ICCV 2009</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2009///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pages><style face="normal" font="default" size="100%">2170 - 2176</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;Images taken from different views of a planar object are related by planar homography. Recovering the parameters of such transformations is a fundamental problem in computer vision with various applications. This paper proposes a novel method to estimate the parameters of a homography that aligns two binary images. It is obtained by solving a system of nonlinear equations generated by integrating linearly independent functions over the domains determined by the shapes. The advantage of the proposed solution is that it is easy to implement, less sensitive to the strength of the deformation, works without established correspondences and robust against segmentation errors. The method has been tested on synthetic as well as on real images and its efficiency has been demonstrated in the context of two different applications: alignment of hip prosthesis X-ray images and matching of traffic signs. ©2009 IEEE.&lt;/p&gt;</style></abstract><notes><style face="normal" font="default" size="100%">UT: 000294955300280ScopusID: 77953177385doi: 10.1109/ICCV.2009.5459474</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Dmitrij Chetverikov</style></author><author><style face="normal" font="default" size="100%">Tamas Sziranyi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Síkbeli alakzatok regisztrációja kovariáns függvények felhasználásával</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2009</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2009</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Akaprint</style></publisher><pub-location><style face="normal" font="default" size="100%">Budapest</style></pub-location><pages><style face="normal" font="default" size="100%">1 - 8</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference papers</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jozsef Nemeth</style></author><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Dmitrij Chetverikov</style></author><author><style face="normal" font="default" size="100%">Tamas Sziranyi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Síkhomográfia paramétereinek becslése bináris képeken</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2009</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2009</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Akaprint</style></publisher><pub-location><style face="normal" font="default" size="100%">Budapest</style></pub-location><pages><style face="normal" font="default" size="100%">1 - 8</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Mihály Gara</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Supervised Color Image Segmentation in a Markovian Framework</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2009///</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/~kato/software/colormrfdemo.html</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;This is the sample implementation of a Markov random field based color image segmentation algorithm described in the following paper: Zoltan Kato, Ting Chuen Pong, and John Chung Mong Lee. Color Image Segmentation and Parameter Estimation in a Markovian Framework. Pattern Recognition Letters, 22(3-4):309--321, March 2001. Note that the current demo program implements only a supervised version of the segmentation method described in the above paper (i.e. parameter values are learned interactively from representative regions selected by the user). Otherwise, the program implements exactly the color MRF model proposed in the paper. Images are automatically converted from RGB to the perceptually uniform CIE-L*u*v* color space before segmentation.&lt;/p&gt;</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Aurélio Campilho</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Binary image registration using covariant gaussian densities</style></title><secondary-title><style face="normal" font="default" size="100%">Image Analysis and Recognition</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title><short-title><style face="normal" font="default" size="100%">LNCS</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">June 2008</style></date></pub-dates></dates><number><style face="normal" font="default" size="100%">5112</style></number><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Póvoa de Varzim, Portugal</style></pub-location><pages><style face="normal" font="default" size="100%">455 - 464</style></pages><isbn><style face="normal" font="default" size="100%">978-3-540-69811-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;We consider the estimation of 2D affine transformations aligning a known binary shape and its distorted observation. The classical way to solve this registration problem is to find correspondences between the two images and then compute the transformation parameters from these landmarks. In this paper, we propose a novel approach where the exact transformation is obtained as a least-squares solution of a linear system. The basic idea is to fit a Gaussian density to the shapes which preserves the effect of the unknown transformation. It can also be regarded as a consistent coloring of the shapes yielding two rich functions defined over the two shapes to be matched. The advantage of the proposed solution is that it is fast, easy to implement, works without established correspondences and provides a unique and exact solution regardless of the magnitude of transformation. © 2008 Springer-Verlag Berlin Heidelberg.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000257302500045ScopusID: 47749098390doi: 10.1007/978-3-540-69812-8_45</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Péter Balázs</style></author><author><style face="normal" font="default" size="100%">Balázs Erdőhelyi</style></author><author><style face="normal" font="default" size="100%">Endre Katona</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Eörs Máté</style></author><author><style face="normal" font="default" size="100%">Antal Nagy</style></author><author><style face="normal" font="default" size="100%">László Gábor Nyúl</style></author><author><style face="normal" font="default" size="100%">Kálmán Palágyi</style></author><author><style face="normal" font="default" size="100%">Attila Tanacs</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Attila Pethő</style></author><author><style face="normal" font="default" size="100%">Miklós Herdon</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A képfeldolgozás kutatása a Szegedi Tudományegyetemen</style></title><secondary-title><style face="normal" font="default" size="100%">Informatika a felsőoktatásban 2008</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2008///</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.agr.unideb.hu/if2008/kiadvany/papers/E62.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Debreceni Egyetem Informatikai Kar</style></publisher><pub-location><style face="normal" font="default" size="100%">Debrecen</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">A digitális képfeldolgozás kutatásának a Szegedi TudományegyetemTermészettudományi és Informatikai Karán, az Informatikai 
Tanszékcsoport Képfeldolgozás és Számítógépes Grafika Tanszékén 
közel négy évtizedes hagyománya van.
A Tanszék valamennyi munkatársa nemzetközileg elismert 
kutatómunkát folytat, melyet már több száz rangos publikáció 
fémjelez. Számos, a képfeldolgozás kutatásában vezető egyetemmel 
és kutatóintézettel építettünk ki szoros kapcsolatot és 
folytattunk eredményes kutatómunkát, aktív résztvevői vagyunk a 
hazai és a nemzetközi tudományos közéletnek.
A legfontosabb, jelenleg is folyó kutatásaink: orvosi képek 
feldolgozása, diszkrét tomográfia, képszegmentálás, 
térinformatika, távérzékelés, képregisztráció, vázkijelölés, 
műtéti tervezés.
</style></abstract><notes><style face="normal" font="default" size="100%">Art. No.: E62</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Joseph M Francos</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Parametric estimation of affine deformations of binary images</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">March 2008</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Las Vegas, NV, USA</style></pub-location><pages><style face="normal" font="default" size="100%">889 - 892</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-1483-3 </style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;We consider the problem of planar object registration on binary images where the aligning transformation is restricted to the group of affine transformations. Previous approaches usually require established correspondences or the solution of nonlinear optimization problems. Herein we show that it is possible to formulate the problem as the solution of a system of up to third order polynomial equations. These equations are constructed in a simple way using some basic geometric information of binary images. It does not need established correspondences nor the solution of complex optimization problems. The resulting algorithm is fast and provides a direct solution regardless of the magnitude of transformation. ©2008 IEEE.&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><accession-num><style face="normal" font="default" size="100%">9973096 </style></accession-num><notes><style face="normal" font="default" size="100%">UT: 000257456700223ScopusID: 51449098982doi: 10.1109/ICASSP.2008.4517753</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Segmentation of color images via reversible jump MCMC sampling</style></title><secondary-title><style face="normal" font="default" size="100%">IMAGE AND VISION COMPUTING</style></secondary-title><short-title><style face="normal" font="default" size="100%">IMAGE VISION COMPUT</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">March 2008</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Elsevier</style></publisher><volume><style face="normal" font="default" size="100%">26</style></volume><pages><style face="normal" font="default" size="100%">361 - 371</style></pages><isbn><style face="normal" font="default" size="100%">0262-8856</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">3</style></issue><work-type><style face="normal" font="default" size="100%">Journal article</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000252196500005doi: 10.1016/j.imavis.2006.12.004</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Peter Horvath</style></author><author><style face="normal" font="default" size="100%">Ian Jermyn</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Attila Fazekas</style></author><author><style face="normal" font="default" size="100%">András Hajdú</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Kör alakú objektumok szegmentálása magasabb rendű aktív kontúr modellek segítségével</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2007</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2007</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Képfeldolgozók és Alakfelismerők Társasága</style></publisher><pub-location><style face="normal" font="default" size="100%">Debrecen</style></pub-location><pages><style face="normal" font="default" size="100%">133 - 140</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>32</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Markovian Image Models and their Application in Image Segmentation</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2007</style></date></pub-dates></dates><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">PhD Thesis</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Domokos</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Joseph M Francos</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Attila Fazekas</style></author><author><style face="normal" font="default" size="100%">András Hajdú</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Parametric Estimation of Two-Dimensional Affine Transformations of Binary Images</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2007</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2007</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Képfeldolgozók és Alakfelismerők Társasága</style></publisher><pub-location><style face="normal" font="default" size="100%">Debrecen</style></pub-location><pages><style face="normal" font="default" size="100%">257 - 265</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Benedek</style></author><author><style face="normal" font="default" size="100%">Tamas Sziranyi</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Three-layer MRF model for Object Motion Detection in Airborne Images</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2007///</style></date></pub-dates></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Peter Horvath</style></author><author><style face="normal" font="default" size="100%">Ian Jermyn</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">YY Tang</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A higher-order active contour model for tree detection</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the18th International Conference on Pattern Recognition, ICPR 2006</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2006///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pages><style face="normal" font="default" size="100%">130 - 133</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;We present a model of a 'gas of circles', the ensemble of regions in the image domain consisting of an unknown number of circles with approximately fixed radius and short range repulsive interactions, and apply it to the extraction of tree crowns from aerial images. The method uses the recently introduced 'higher order active contours' (HOACs), which incorporate long-range interactions between contour points, and thereby include prior geometric information without using a template shape. This makes them ideal when looking for multiple instances of an entity in an image. We study an existing HOAC model for networks, and show via a stability calculation that circles stable to perturbations are possible for constrained parameter sets. Combining this prior energy with a data term, we show results on aerial imagery that demonstrate the effectiveness of the method and the need for prior geometric knowledge. The model has many other potential applications. © 2006 IEEE.&lt;/p&gt;</style></abstract><notes><style face="normal" font="default" size="100%">ScopusID: 34047219865doi: 10.1109/ICPR.2006.79</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Peter Horvath</style></author><author><style face="normal" font="default" size="100%">Ian Jermyn</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Higher-Order Active Contour Model for Tree Detection</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the International Conference on Pattern Recognition (ICPR)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2006</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IAPR</style></publisher><pub-location><style face="normal" font="default" size="100%">Hong Kong, China</style></pub-location><volume><style face="normal" font="default" size="100%">2</style></volume><pages><style face="normal" font="default" size="100%">130–133</style></pages><abstract><style face="normal" font="default" size="100%">&lt;p&gt;We present a model of a 'gas of circles', the ensemble of regions in the image domain consisting of an unknown number of circles with approximately fixed radius and short range repulsive interactions, and apply it to the extraction of tree crowns from aerial images. The method uses the recently introduced 'higher order active contours' (HOACs), which incorporate long-range interactions between contour points, and thereby include prior geometric information without using a template shape. This makes them ideal when looking for multiple instances of an entity in an image. We study an existing HOAC model for networks, and show via a stability calculation that circles stable to perturbations are possible for constrained parameter sets. Combining this prior energy with a data term, we show results on aerial imagery that demonstrate the effectiveness of the method and the need for prior geometric knowledge. The model has many other potential applications. &lt;tt&gt; &lt;/tt&gt;&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Peter Horvath</style></author><author><style face="normal" font="default" size="100%">Ian Jermyn</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Higher-Order Active Contour Model of a `Gas of Circles' and its Application to Tree Crown Extraction</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2006///</style></date></pub-dates></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Peter Horvath</style></author><author><style face="normal" font="default" size="100%">Ian Jermyn</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Prem Kalra</style></author><author><style face="normal" font="default" size="100%">Shmuel Peleg</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">An Improved `Gas of Circles' Higher-Order Active Contour Model and its Application to Tree Crown Extraction</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of Indian Conference on Computer Vision, Graphics and Image Processing (ICVGIP)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2006///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Springer Verlag</style></publisher><pub-location><style face="normal" font="default" size="100%">Berlin; Heidelberg; New York</style></pub-location><pages><style face="normal" font="default" size="100%">152 - 161</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">doi: 10.1007/11949619_14</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ting Chuen Pong</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Markov random field image segmentation model for color textured images</style></title><secondary-title><style face="normal" font="default" size="100%">IMAGE AND VISION COMPUTING</style></secondary-title><short-title><style face="normal" font="default" size="100%">IMAGE VISION COMPUT</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2006///</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">24</style></volume><pages><style face="normal" font="default" size="100%">1103 - 1114</style></pages><isbn><style face="normal" font="default" size="100%">0262-8856</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">10</style></issue><notes><style face="normal" font="default" size="100%">UT: 000241228300006doi: 10.1016/j.imavis.2006.03.005</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Benedek</style></author><author><style face="normal" font="default" size="100%">Tamas Sziranyi</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">IEEE</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A multi-layer MRF model for object-motion detection in unregistered airborne image-pairs</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings - 14th International Conference on Image Processing, ICIP 2007</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2006///</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.icip2007.org/Papers/AcceptedList.asp</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Piscataway</style></pub-location><pages><style face="normal" font="default" size="100%">VI-141 - VI-144</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ting Chuen Pong</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">P J Narayanan</style></author><author><style face="normal" font="default" size="100%">S K Nayar</style></author><author><style face="normal" font="default" size="100%">H Y Shum</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A multi-layer MRF model for video object segmentation</style></title><secondary-title><style face="normal" font="default" size="100%">COMPUTER VISION - ACCV 2006, PT II</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2006///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Springer Verlag</style></publisher><pages><style face="normal" font="default" size="100%">953 - 962</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">UT: 000235773200095doi: 10.1007/11612704_95</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Peter Horvath</style></author><author><style face="normal" font="default" size="100%">Avik Bhattacharya</style></author><author><style face="normal" font="default" size="100%">Ian Jermyn</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Dmitrij Chetverikov</style></author><author><style face="normal" font="default" size="100%">László Czúni</style></author><author><style face="normal" font="default" size="100%">Markus Vincze</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Shape Moments for Region Based Active Contours</style></title><secondary-title><style face="normal" font="default" size="100%">Joint Hungarian-Austrian conference on image processing and pattern recognition. 5th conference of the Hungarian Association for Image Processing and Pattern Recognition (KÉPAF), 29th workshop of the Austrian Association for Pattern Reco</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2005///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">OCG</style></publisher><pub-location><style face="normal" font="default" size="100%">Vienna</style></pub-location><pages><style face="normal" font="default" size="100%">187 - 194</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Csaba Gradwohl</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Supervised Image Segmentation Using Markov Random Fields</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2005///</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.inf.u-szeged.hu/~kato/software/mrfdemo.html</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This is the sample implementation of a Markov random field based image segmentation algorithm described in the following papers: Mark Berthod, Zoltan Kato, Shan Yu, and Josiane Zerubia. Bayesian Image Classification Using Markov Random Fields. Image and Vision Computing, 14:285--295, 1996. Keyword(s): Bayesian image classification, Markov random fields, Optimisation.
 Zoltan Kato, Josiane Zerubia, and Mark Berthod. Satellite Image Classification Using a Modified Metropolis Dynamics. In Proceedings of International Conference on Acoustics, Speech and Signal Processing, volume 3, San-Francisco, California, USA, pages 573-576, March 1992. IEEE.
 Zoltan Kato. Modélisations markoviennes multirésolutions en vision par ordinateur. Application a` la segmentation d'images SPOT. PhD Thesis, INRIA, Sophia Antipolis, France, December 1994. Note: Available in French (follow the URL link) and English. Keyword(s): computer vision, early vision, Markovian model, multiscale model, hierarchical model, parallel combinatorial optimization algorithm, multi-temperature annealing, parameter estimation.
</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ting Chuen Pong</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Dmitrij Chetverikov</style></author><author><style face="normal" font="default" size="100%">László Czúni</style></author><author><style face="normal" font="default" size="100%">Markus Vincze</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Video Object Segmentation Using a Multicue Markovian Model</style></title><secondary-title><style face="normal" font="default" size="100%">Joint Hungarian-Austrian conference on image processing and pattern recognition. 5th conference of the Hungarian Association for Image Processing and Pattern Recognition (KÉPAF), 29th workshop of the Austrian Association for Pattern Reco</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2005///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">OCG</style></publisher><pub-location><style face="normal" font="default" size="100%">Vienna</style></pub-location><pages><style face="normal" font="default" size="100%">111 - 118</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Peter Horvath</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Color, Texture and Motion Segmentation Using Gradient Vector Flow</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2004</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2004</style></date></pub-dates></dates><pub-location><style face="normal" font="default" size="100%">Miskolctapolca</style></pub-location><pages><style face="normal" font="default" size="100%">131 - 137</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ting Chuen Pong</style></author><author><style face="normal" font="default" size="100%">Song Guo Qiang</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Color textured image segmentation using a multi-layer Markovian model</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2004</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2004</style></date></pub-dates></dates><pub-location><style face="normal" font="default" size="100%">Miskolctapolca</style></pub-location><pages><style face="normal" font="default" size="100%">152 - 158</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Peter Horvath</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Optical Flow Computation Using an Energy Minimization Approach</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2004</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2004</style></date></pub-dates></dates><pub-location><style face="normal" font="default" size="100%">Miskolctapolca</style></pub-location><pages><style face="normal" font="default" size="100%">125 - 130</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Andreas Hoppe</style></author><author><style face="normal" font="default" size="100%">Sarah Barman</style></author><author><style face="normal" font="default" size="100%">Tim Ellis</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Reversible Jump Markov Chain Monte Carlo for Unsupervised MRF Color Image SegmentationProceedings of Brithish Machine Vision Conference (BMVC)</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2004.09</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.bmva.org/bmvc/2004/papers/paper_223.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">BMVA</style></publisher><pages><style face="normal" font="default" size="100%">37 - 46</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Reversible Jump Markov Chain Monte Carlo for Unsupervised MRF Color Image Segmentation</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2004</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2004.01.28</style></date></pub-dates></dates><pub-location><style face="normal" font="default" size="100%">Miskolctapolca</style></pub-location><pages><style face="normal" font="default" size="100%">144 - 151</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antal Nagy</style></author><author><style face="normal" font="default" size="100%">Emese Balogh</style></author><author><style face="normal" font="default" size="100%">Mariann Dudásné Nagy</style></author><author><style face="normal" font="default" size="100%">Attila Kuba</style></author><author><style face="normal" font="default" size="100%">Eörs Máté</style></author><author><style face="normal" font="default" size="100%">Kálmán Palágyi</style></author><author><style face="normal" font="default" size="100%">Endre Katona</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">László Gábor Nyúl</style></author><author><style face="normal" font="default" size="100%">Attila Tanacs</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Zoltán Gácsi</style></author><author><style face="normal" font="default" size="100%">Péter Barkóczy</style></author><author><style face="normal" font="default" size="100%">Gábor Sárközi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Számítógépes képfeldolgozás oktatása a Szegedi Tudományegyetemen</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 2004</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Jan 2004</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Neumann János Számítógép-tudományi Társaság</style></publisher><pub-location><style face="normal" font="default" size="100%">Miskolc</style></pub-location><pages><style face="normal" font="default" size="100%">191 - 196</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;Az SZTE Informatikai Tanszékcsoportja által gondozott szakoktanterveiben 1993 óta szerepel a képfeldolgozás és alkalmazásainak oktatása. A kreditrendszer bevezetésével a Képfeldolgozás I. tárgy kötelező az ötéves képzésben részt vevő informatikus hallgatóknak. Ezen felül a választható szakirányok között szintén szerepel a Képfeldolgozás szakirány. A szakirányon belül különböző képpfeldolgozási területeket tárgyaló kurzusok épülnek egymásra. Az elméleti megalapozás mellett a képfeldolgozás alkalmazásaira is nagy hangsúlyt fektetünk. A kutatások illetve az orvosi alkalmazások fejlesztése során szerzett eredményeket a kötelező jellegű tárgyak mellett speciálkollégiumok keretében építjül be az otkatási anyagba. Számos hallgatónk választ a képfeldolgzás területéről témát a diplomamunkájához, dolgozataikkal rendszeresen és sikerrel szerepelnek az OTDK-n. Hallgatóink évente több hónapot tölthetnek külföldi partneregyetemeinken, ahol a kutató- és fejlesztőmunka mellett nálunk is elfogadott kurzusokat teljesíthetnek. A képfeldolgozás témakörön belül &quot;ipari&quot; projekt munkákban is egyre több hallgató vesz részt. A doktori programon belül is meghirdetünk képfeldolgozáshoz kapcsolódó kutatási irányokat. Az évente megrendezésre kerülő, 11-éves múltra visszatekintő Képfeldolgozó Nyári Iskolának (SSIP) eddig hatszor adott otthont Szeged. A rendszvénysorozat kiemelkedő fontosságú nemzetközi fórum hallgatóink és oktatóink számára is.&lt;/p&gt;</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Xiaowen Ji</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Zhiyong Huang</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jon Rokne</style></author><author><style face="normal" font="default" size="100%">Reinhard Klein</style></author><author><style face="normal" font="default" size="100%">Wenping Wang</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Non-Photorealistic Rendering and Content-Based Image Retrieval</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of 11th Pacific Conference on Computer Graphics and Applications (PG)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2003</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2003///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE Computer Soc. Pr.</style></publisher><pub-location><style face="normal" font="default" size="100%">New York</style></pub-location><pages><style face="normal" font="default" size="100%">153 - 162</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">doi: 10.1109/PCCGA.2003.1238257</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ting Chuen Pong</style></author><author><style face="normal" font="default" size="100%">Song Guo Qiang</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">IEEE</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Unsupervised segmentation of color textured images using a multi-layer MRF model</style></title><secondary-title><style face="normal" font="default" size="100%">ICIP 2003: IEEE International Conference on Image Processing</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2003</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2003///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pages><style face="normal" font="default" size="100%">961 - 964</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;Herein, we propose a novel multi-layer Markov random field (MRF) image segmentation model which aims at combining color and texture features: Each feature is associated to a so called feature layer, where an MRF model is defined using only the corresponding feature. A special layer is assigned to the combined MRF model. This layer interacts with each feature layer and provides the segmentation based on the combination of different features. The model is quite generic and isn't restricted to a particular texture feature. Herein we will test the algorithm using Gabor and MRSAR texture features. Furthermore, the algorithm automatically estimates the number of classes at each layer (there can be different classes at different layers) and the associated model parameters.&lt;/p&gt;</style></abstract><notes><style face="normal" font="default" size="100%">ScopusID: 0344666539doi: 10.1109/ICIP.2003.1247124</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Xiaowen Ji</style></author><author><style face="normal" font="default" size="100%">Tamas Sziranyi</style></author><author><style face="normal" font="default" size="100%">Zoltán Tóth</style></author><author><style face="normal" font="default" size="100%">László Czúni</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">&amp;</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Content-based image retrieval using stochastic paintbrush transformation</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE - International Conference on Image Processing: ICIP</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Sep 2002</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE Computer Society Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Aix-en-Provence</style></pub-location><pages><style face="normal" font="default" size="100%">944 - 947</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">UT: 000185208200237</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">André Jalobeanu</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Markov random fields in image processing application to remote sensing and astrophysics</style></title><secondary-title><style face="normal" font="default" size="100%">JOURNAL DE PHYSIQUE IV</style></secondary-title><short-title><style face="normal" font="default" size="100%">J PHYS IV</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2002///</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">12</style></volume><pages><style face="normal" font="default" size="100%">117 - 136</style></pages><isbn><style face="normal" font="default" size="100%">1155-4339</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">1</style></issue><notes><style face="normal" font="default" size="100%">UT: 000175261200006doi: 10.1051/jp42002005</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ting Chuen Pong</style></author><author><style face="normal" font="default" size="100%">Song Guo Qiang</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Ranga Katsuri</style></author><author><style face="normal" font="default" size="100%">D Laurendeau</style></author><author><style face="normal" font="default" size="100%">Ching Y Suen</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Multicue MRF image segmentation: Combining texture and color features</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings 16th International Conference on Pattern Recognition (ICPR 2002)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2002///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE Computer Society</style></publisher><pages><style face="normal" font="default" size="100%">660 - 663</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;Herein, we propose a new Markov random field (MRF) image segmentation model which aims at combining color and texture features. The model has a multi-layer structure: Each feature has its own layer, called feature layer, where an MRF model is defined using only the corresponding feature. A special layer is assigned to the combined MRF model. This layer interacts with each feature layer and provides the segmentation based on the combination of different features. The uniqueness of our algorithm is that it provides both color only and texture only segmentations as well as a segmentation based on combined color and texture features. The number of classes on feature layers is given by the user but it is estimated on the combined layer. © 2002 IEEE.&lt;/p&gt;</style></abstract><notes><style face="normal" font="default" size="100%">ScopusID: 33751583776</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ting Chuen Pong</style></author><author><style face="normal" font="default" size="100%">Chung-Mong J Lee</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Color image segmentation and parameter estimation in a markovian framework</style></title><secondary-title><style face="normal" font="default" size="100%">PATTERN RECOGNITION LETTERS</style></secondary-title><short-title><style face="normal" font="default" size="100%">PATTERN RECOGN LETT</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2001</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2001///</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">22</style></volume><pages><style face="normal" font="default" size="100%">309 - 321</style></pages><isbn><style face="normal" font="default" size="100%">0167-8655</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;An unsupervised color image segmentation algorithm is presented, using a Markov random field (MRF) pixel classification model. We propose a new method to estimate initial mean vectors effectively even if the histogram does not have clearly distinguishable peaks. The only parameter supplied by the user is the number of classes. © 2001 Elsevier Science B.V. All rights reserved.&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">3-4</style></issue><notes><style face="normal" font="default" size="100%">UT: 000167983900005ScopusID: 0035272740doi: 10.1016/S0167-8655(00)00106-9</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ting Chuen Pong</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Wladyslaw Skarbek</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A Markov Random Field Image Segmentation Model Using Combined Color and Texture Features</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of International Conference on Computer Analysis of Images and Patterns (CAIP)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2001</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2001///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Springer Verlag</style></publisher><pub-location><style face="normal" font="default" size="100%">Berlin; Heidelberg</style></pub-location><pages><style face="normal" font="default" size="100%">547 - 554</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">doi: 10.1007/3-540-44692-3_66</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tamas Sziranyi</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">László Czúni</style></author><author><style face="normal" font="default" size="100%">David Geldreich</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Image segmentation using Markov random field model in fully parallel cellular network architectures</style></title><secondary-title><style face="normal" font="default" size="100%">REAL-TIME IMAGING</style></secondary-title><short-title><style face="normal" font="default" size="100%">REAL-TIME IMAGING</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2000</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2000///</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.sztaki.hu/~sziranyi/Papers/Sziranyi_MRF.pdf</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">6</style></volume><pages><style face="normal" font="default" size="100%">195 - 211</style></pages><isbn><style face="normal" font="default" size="100%">1077-2014</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">3</style></issue><notes><style face="normal" font="default" size="100%">UT: 000088331700003ScopusID: 0034204755</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Bayesian Color Image Segmentation Using Reversible Jump Markov Chain Monte Carlo</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1999///</style></date></pub-dates></dates><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">http://dl.acm.org/citation.cfm?id=869110http://dl.acm.org/citation.cfm?id=869110</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>27</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Bayesian Color Image Segmentation Using Reversible Jump Markov Chain Monte Carlo</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year><pub-dates><date><style  face="normal" font="default" size="100%">January 1999</style></date></pub-dates></dates><number><style face="normal" font="default" size="100%">01/99-R055</style></number><publisher><style face="normal" font="default" size="100%">ERCIM/CWI</style></publisher><pub-location><style face="normal" font="default" size="100%">Amsterdam, The Netherlands</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><work-type><style face="normal" font="default" size="100%">Research Report</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">Mark Berthod</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Unsupervised parallel image classification using Markovian models</style></title><secondary-title><style face="normal" font="default" size="100%">PATTERN RECOGNITION</style></secondary-title><short-title><style face="normal" font="default" size="100%">PATTERN RECOGN</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1999///</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">32</style></volume><pages><style face="normal" font="default" size="100%">591 - 604</style></pages><isbn><style face="normal" font="default" size="100%">0031-3203</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;This paper deals with the problem of unsupervised classification of images modeled by Markov random fields (MRF). If the model parameters are known then we have various methods to solve the segmentation problem (simulated annealing (SA), iterated conditional modes (ICM), etc). However, when the parameters are unknown, the problem becomes more difficult. One has to estimate the hidden label field parameters only from the observed image. Herein, we are interested in parameter estimation methods related to monogrid and hierarchical MRF models. The basic idea is similar to the expectation-maximization (EM) algorithm: we recursively look at the maximum a posteriori (MAP) estimate of the label field given the estimated parameters, then we look at the maximum likelihood (ML) estimate of the parameters given a tentative labeling obtained at the previous step. The only parameter supposed to be known is the number of classes, all the other parameters are estimated. The proposed algorithms have been implemented on a Connection Machine CM200. Comparative experiments have been performed on both noisy synthetic data and real images. © 1999 Pattern Recognition Society. Published by Elsevier Science Ltd. All rights reserved.&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">4</style></issue><notes><style face="normal" font="default" size="100%">UT: 000079145300005ScopusID: 0033116536doi: 10.1016/S0031-3203(98)00104-6</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ting Chuen Pong</style></author><author><style face="normal" font="default" size="100%">John Chung Mong Lee</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Roland Chin</style></author><author><style face="normal" font="default" size="100%">Ting Chuen Pong</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Motion Compensated Color Video Classification Using Markov Random Fields</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of Asian Conference on Computer Vision (ACCV)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1998</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1998///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Springer Verlag</style></publisher><pub-location><style face="normal" font="default" size="100%">Berlin; Heidelberg</style></pub-location><pages><style face="normal" font="default" size="100%">738 - 745</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">doi: 10.1007/3-540-63930-6_189</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ting Chuen Pong</style></author><author><style face="normal" font="default" size="100%">John Chung Mong Lee</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Hung Tat Tsui</style></author><author><style face="normal" font="default" size="100%">Chi Kit Ronald Chung</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Color Image Classification and Parameter Estimation in a Markovian FrameworkProceedings of Workshop on 3D Computer Vision</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1997</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1997.05</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.128.1560</style></url></web-urls></urls><pages><style face="normal" font="default" size="100%">75 - 79</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tamas Sziranyi</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">László Czúni</style></author><author><style face="normal" font="default" size="100%">David Geldreich</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Image segmentation using Markov random field model in fully parallel cellular network architectures.</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1997</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1997///</style></date></pub-dates></dates><pages><style face="normal" font="default" size="100%"> - 17</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tamas Sziranyi</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">László Czúni</style></author><author><style face="normal" font="default" size="100%">David Geldreich</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Markov Random Field Image Segmentation using Cellular Neural Network</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1997</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1997///</style></date></pub-dates></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Ting Chuen Pong</style></author><author><style face="normal" font="default" size="100%">John Chung Mong Lee</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Motion Compensated Color Image Classification and Parameter Estimation in a Markovian Framework</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1997</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1997///</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://biblioteca.universia.net/html_bura/ficha/params/title/motion-compensated-color-image-classification-and-parameter-estimation-in-markovian/id/5664082.html</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tamas Sziranyi</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">David Geldreich</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">László Czúni</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Tamas Sziranyi</style></author><author><style face="normal" font="default" size="100%">József Berke</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">MRF based image segmentation with fully parallel cellular nonlinear networks</style></title><secondary-title><style face="normal" font="default" size="100%">A Képfeldolgozók és Alakfelismerők Társaságának konferenciája - KÉPAF 1997</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1997</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Oct 1997</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Pannon Agrártudományi Egyetem Georgikon Mezőgazdaságtudományi Kar</style></publisher><pub-location><style face="normal" font="default" size="100%">Keszthely</style></pub-location><pages><style face="normal" font="default" size="100%">43 - 50</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Mark Berthod</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Shan Yu</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Bayesian image classification using Markov random fields</style></title><secondary-title><style face="normal" font="default" size="100%">IMAGE AND VISION COMPUTING</style></secondary-title><short-title><style face="normal" font="default" size="100%">IMAGE VISION COMPUT</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">1996</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1996///</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">14</style></volume><pages><style face="normal" font="default" size="100%">285 - 295</style></pages><isbn><style face="normal" font="default" size="100%">0262-8856</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;In this paper, we present three optimisation techniques, Deterministic Pseudo-Annealing (DPA), Game Strategy Approach (GSA), and Modified Metropolis Dynamics (MMD), in order to carry out image classification using a Markov random field model. For the first approach (DPA), the a posteriori probability of a tentative labelling is generalised to a continuous labelling. The merit function thus defined has the same maxima under constraints yielding probability vectors. Changing these constraints convexifies the merit function. The algorithm solves this unambiguous maximisation problem, and then tracks down the solution while the original constraints are restored yielding a good, even if suboptimal, solution to the original labelling assignment problem. In the second method (GSA), the maximisation problem of the a posteriori probability of the labelling is solved by an optimisation algorithm based on game theory. A non-cooperative n-person game with pure strategies is designed such that the set of Nash equilibrium points of the game is identical to the set of local maxima of the a posteriori probability of the labelling. The algorithm converges to a Nash equilibrium. The third method (MMD) is a modified version of the Metropolis algorithm: at each iteration the new state is chosen randomly, but the decision to accept it is purely deterministic. This is also a suboptimal technique but it is much faster than stochastic relaxation. These three methods have been implemented on a Connection Machine CM2. Experimental results are compared to those obtained by the Metropolis algorithm, the Gibbs sampler and ICM (Iterated Conditional Mode).&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">4</style></issue><notes><style face="normal" font="default" size="100%">UT: A1996UT58100004ScopusID: 0030148684doi: 10.1016/0262-8856(95)01072-6</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tamas Sziranyi</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">David Geldreich</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">*IEEE Circuits &amp; *Society</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Cellular Neural Network in Markov Random Field Image Segmentation</style></title><secondary-title><style face="normal" font="default" size="100%">1996 FOURTH IEEE INTERNATIONAL WORKSHOP ON CELLULAR NEURAL NETWORKS AND THEIR APPLICATIONS, PROCEEDINGS (CNNA-96)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1996</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1996///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Wiley - IEEE Press</style></publisher><pub-location><style face="normal" font="default" size="100%">New York</style></pub-location><pages><style face="normal" font="default" size="100%">139 - 144</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">UT: A1996BH11L00025ScopusID: 0030409916Besorolás: Konferenciaközlemény</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Mark Berthod</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Hierarchical Markov Random Field Model and Multitemperature Annealing for Parallel Image Classification</style></title><secondary-title><style face="normal" font="default" size="100%">GRAPHICAL MODELS AND IMAGE PROCESSING</style></secondary-title><short-title><style face="normal" font="default" size="100%">GRAPH MODEL IM PROC</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">1996</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1996///</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">58</style></volume><pages><style face="normal" font="default" size="100%">18 - 37</style></pages><isbn><style face="normal" font="default" size="100%">1077-3169</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;In this paper, we are interested in massively parallel multiscale relaxation algorithms applied to image classification. It is well known that multigrid methods can improve significantly the convergence rate and the quality of the final results of iterative relaxation techniques. First, we present a classical multiscale model which consists of a label pyramid and a whole observation field. The potential functions of coarser grids are derived by simple computations. The optimization problem is first solved at the higher scale by a parallel relaxation algorithm; then the next lower scale is initialized by a projection of the result. Second, we propose a hierarchical Markov random field model based on this classical model. We introduce new interactions between neighbor levels in the pyramid. It can also be seen as a way to incorporate cliques with far apart sites for a reasonable price. This model results in a relaxation algorithm with a new annealing scheme: the multitemperature annealing (MTA) scheme, which consists of associating higher temperatures to higher levels, in order to be less sensitive to local minima at coarser grids. The convergence to the global optimum is proved by a generalization of the annealing theorem of S. Geman and D. Geman (IEEE Trans. Pattern Anal. Mach. Intell. 6, 1984, 721-741). © 1996 Academic Press, Inc.&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">1</style></issue><notes><style face="normal" font="default" size="100%">UT: A1996TZ03400002ScopusID: 0029732459doi: 10.1006/gmip.1996.0002</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Marc Berthod</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">DPA: a deterministic approach to the MAP problem</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE TRANSACTIONS ON IMAGE PROCESSING</style></secondary-title><short-title><style face="normal" font="default" size="100%">IEEE T IMAGE PROCESS</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">1995</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1995///</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">4</style></volume><pages><style face="normal" font="default" size="100%">1312 - 1314</style></pages><isbn><style face="normal" font="default" size="100%">1057-7149</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Deterministic pseudo-annealing (DPA) is a new deterministic optimization method for finding the maximum a posteriori (MAP) labeling in a Markov random field, in which the probability of a tentative labeling is extended to a merit function on continuous labelings. This function is made convex by changing its definition domain. This unambiguous maximization problem is solved, and the solution is followed down to the original domain, yielding a good, if suboptimal, solution to the original labeling assignment problem. The performance of DPA is analyzed on randomly weighted graphs.</style></abstract><issue><style face="normal" font="default" size="100%">9</style></issue><notes><style face="normal" font="default" size="100%">UT: A1995RT35400011ScopusID: 0029375669doi: 10.1109/83.413175</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">Marc Berthod</style></author><author><style face="normal" font="default" size="100%">Wojciech Pieczynski</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">*IEEE Signal Pro *Society</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Unsupervised adaptive image segmentation</style></title><secondary-title><style face="normal" font="default" size="100%">ICASSP-95</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1995</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1995///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Piscataway</style></pub-location><pages><style face="normal" font="default" size="100%">2399 - 2402</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper deals with the problem of unsupervised Bayesian segmentation of images modeled by Markov Random Fields (MRF). If the model parameters are known then we have various methods to solve the segmentation problem (Simulated Annealing, ICM, etc...). However, when they are not known, the problem becomes more difficult. One has to estimate the hidden label field parameters from the available image only. Our approach consists of a recent iterative method of estimation, called Iterative Conditional Estimation (ICE), applied to a monogrid Markovian image segmentation model. The method has been tested on synthetic and real satellite images.</style></abstract><notes><style face="normal" font="default" size="100%">ScopusID: 0028996751doi: 10.1109/ICASSP.1995.479976</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">Marc Berthod</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">IEEE Computer *Society</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Unsupervised parallel image classification using a hierarchical Markovian model</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the 5th International Conference on Computer Vision</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1995</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1995///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Piscataway</style></pub-location><pages><style face="normal" font="default" size="100%">169 - 174</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper deals with the problem of unsupervised classification of images modeled by Markov Random Fields (MRF). If the model parameters are known then we have various methods to solve the segmentation problem (simulated annealing, ICM, etc...). However, when they are not known, the problem becomes more difficult. One has to estimate the hidden label field parameters from the only observable image. Our approach consists of extending a recent iterative method of estimation, called Iterative Conditional Estimation (ICE) to a hierarchical markovian model. The idea resembles the Estimation-Maximization (EM) algorithm as we recursively look at the Maximum a Posteriori (MAP) estimate of the label field given the estimated parameters then we look at the Maximum Likelihood (ML) estimate of the parameters given a tentative labeling obtained at the previous step. We propose unsupervised image classification algorithms using a hierarchical model. The only parameter supposed to be known is the number of regions, all the other parameters are estimated. The presented algorithms have been implemented on a Connection Machine CM200. Comparative tests have been done on noisy synthetic and real images (remote sensing).</style></abstract><notes><style face="normal" font="default" size="100%">ScopusID: 0029214757doi: 10.1109/ICCV.1995.466790</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>32</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Multi-scale Markovian Modelisation in Computer Vision with Applications to SPOT Image Segmentation : Modélisations markoviennes multirésolutions en vision par ordinateur. Application ŕ la segmentation d'images SPOT</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1994</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1994</style></date></pub-dates></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Mark Berthod</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">IEEE Computer *Society</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Multi-Temperature Annealing: A New Approach for the Energy-Minimization of Hierarchical Markov Random Field Models</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the 12th IAPR International Conference on Pattern Recognition</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1994</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1994///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Los Alamitos</style></pub-location><pages><style face="normal" font="default" size="100%">520 - 522</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">doi: 10.1109/ICPR.1994.576342</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">Mark Berthod</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Segmentation hiérarchique d'images sur CM200 (Hierarchical Image Segmentation on the CM200)</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1994</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1994///</style></date></pub-dates></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">Mark Berthod</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Segmentation multirésolution d'images sur SUN version 1 du 26.05.1994 (Multiresolution Image Segmentation on SUN version 1 of 26.05.1994)</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1994</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1994///</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.app.asso.fr/en/</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">Mark Berthod</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Ali Mohammad-Djafari</style></author><author><style face="normal" font="default" size="100%">Guy Demoment</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Bayesian Image Classification Using Markov Random Fields</style></title><secondary-title><style face="normal" font="default" size="100%">Maximum Entropy and Bayesian Methods</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1993</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1993///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Kluwer Academic Publishers</style></publisher><pub-location><style face="normal" font="default" size="100%">Dordrecht; Boston; London</style></pub-location><pages><style face="normal" font="default" size="100%">375 - 382</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Mark Berthod</style></author><author><style face="normal" font="default" size="100%">Gerard Giraudon</style></author><author><style face="normal" font="default" size="100%">Shan Liu</style></author><author><style face="normal" font="default" size="100%">Frank Mangin</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Sabine Urago</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Extraction d'information dans les images SPOT</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1993</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1993///</style></date></pub-dates></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Mark Berthod</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Hierarchical Markov Random Field Model and Multi-Temperature Annealing for Parallel Image Classification</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1993</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1993///</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://hal.inria.fr/inria-00074736/</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Mark Berthod</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Hierarchical Markov Random Field Model for Image Classification</style></title><secondary-title><style face="normal" font="default" size="100%">International Workshop on Image and Multidimensional Digital Signal Processing (IMDSP)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1993</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Sep 1993</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE Computer Soc. Pr.</style></publisher><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">Art. No.: imdsp.ps</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Marc Berthod</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">*IEEE Computer S *Analysis</style></author><author><style face="normal" font="default" size="100%">*Machine *Intelligence</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Multiscale Markov random field models for parallel image classification</style></title><secondary-title><style face="normal" font="default" size="100%">Fourth International Conference on Computer Vision, ICCV 1993, Berlin, Germany, 11-14 May, 1993, Proceedings</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1993</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1993///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Los Alamitos</style></pub-location><pages><style face="normal" font="default" size="100%">253 - 257</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper, we are interested in multiscale Markov Random Field (MRF) models. It is well known that multigrid methods can improve significantly the convergence rate and the quality of the final results of iterative relaxation techniques. Herein, we propose a new hierarchical model, which consists of a label pyramid and a whole observation field. The parameters of the coarse grid can be derived by simple computation from the finest grid. In the label pyramid, we have introduced a new local interaction between two neighbor grids. This model gives a relaxation algorithm which can be run in parallel on the entire pyramid. On the other hand, the new model allows to propagate local interactions more efficiently giving estimates closer to the global optimum for deterministic as well as for stochastic relaxation schemes. It can also be seen as a way to incorporate cliques with far apart sites for a reasonable price.</style></abstract><notes><style face="normal" font="default" size="100%">ScopusID: 0027224261</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Marc Berthod</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">*IEEE Signal Pro *Society</style></author><author><style face="normal" font="default" size="100%">*Institute of Electri *Engineers</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Parallel image classification using multiscale Markov random fields</style></title><secondary-title><style face="normal" font="default" size="100%">ICASSP-93</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1993</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1993///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">New York</style></pub-location><pages><style face="normal" font="default" size="100%">137 - 140</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper, we are interested in massively parallel multiscale relaxation algorithms applied to image classification. First, we present a classical multiscale model applied to supervised image classification. The model consists of a label pyramid and a whole observation field. The potential functions of the coarse grid are derived by simple computations. Then, we propose another scheme introducing a local interaction between two neighbor grids in the label pyramid. This is a way to incorporate cliques with far apart sites for a reasonable price. Finally we present the results on noisy synthetic data and on a SPOT image obtained by different relaxation methods using these models.</style></abstract><notes><style face="normal" font="default" size="100%">ScopusID: 0027266514doi: 10.1109/ICASSP.1993.319766</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">Mark Berthod</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Image Classification Using Markov Random Fields with Two New Relaxation Methods</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1992</style></year><pub-dates><date><style  face="normal" font="default" size="100%">1992///</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://hal.inria.fr/docs/00/07/49/54/PDF/RR-1606.pdf</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author><author><style face="normal" font="default" size="100%">Josiane Zerubia</style></author><author><style face="normal" font="default" size="100%">Mark Berthod</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Satellite Image Classification Using a Modified Metropolis Dynamics</style></title><secondary-title><style face="normal" font="default" size="100%">International Conference on Acoustics, Speech and Signal Processing (ICASSP)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1992</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Mar 1992</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE Computer Soc. Pr.</style></publisher><pages><style face="normal" font="default" size="100%">573 - 576</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">doi: 10.1109/ICASSP.1992.226148</style></notes></record></records></xml>