<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jozsef Nemeth</style></author><author><style face="normal" font="default" size="100%">András Bánhalmi</style></author><author><style face="normal" font="default" size="100%">László G Nyúl</style></author><author><style face="normal" font="default" size="100%">Márta Fidrich</style></author><author><style face="normal" font="default" size="100%">Zsolt Szkiva</style></author><author><style face="normal" font="default" size="100%">Péter Franczia</style></author><author><style face="normal" font="default" size="100%">Csaba Berezki</style></author><author><style face="normal" font="default" size="100%">Vilmos Bilicki</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Újszülöttek monitorozása képfolyam elemzéssel</style></title><secondary-title><style face="normal" font="default" size="100%"> A XXVIII. Neumann Kollokvium konferencia-kiadványa</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2015</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Nov 2015</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Neumann János Számítógép-tudományi Társaság</style></publisher><pub-location><style face="normal" font="default" size="100%">Veszprém, Hungary</style></pub-location><pages><style face="normal" font="default" size="100%">32-37</style></pages><isbn><style face="normal" font="default" size="100%">978-615-5036-10-1</style></isbn><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tobias Heimann</style></author><author><style face="normal" font="default" size="100%">Brahm Van Ginneken</style></author><author><style face="normal" font="default" size="100%">Martin A Styner</style></author><author><style face="normal" font="default" size="100%">Yulia Arzhaeva</style></author><author><style face="normal" font="default" size="100%">Volker Aurich</style></author><author><style face="normal" font="default" size="100%">Christian Bauer</style></author><author><style face="normal" font="default" size="100%">Andreas Beck</style></author><author><style face="normal" font="default" size="100%">Christoph Becker</style></author><author><style face="normal" font="default" size="100%">Reinhardt Beichel</style></author><author><style face="normal" font="default" size="100%">György Bekes</style></author><author><style face="normal" font="default" size="100%">Fernando Bello</style></author><author><style face="normal" font="default" size="100%">Gerd Binnig</style></author><author><style face="normal" font="default" size="100%">Horst Bischof</style></author><author><style face="normal" font="default" size="100%">Alexander Bornik</style></author><author><style face="normal" font="default" size="100%">Peter MM Cashman</style></author><author><style face="normal" font="default" size="100%">Ying Chi</style></author><author><style face="normal" font="default" size="100%">Andres Córdova</style></author><author><style face="normal" font="default" size="100%">Benoit M Dawant</style></author><author><style face="normal" font="default" size="100%">Márta Fidrich</style></author><author><style face="normal" font="default" size="100%">Jacob D Furst</style></author><author><style face="normal" font="default" size="100%">Daisuke Furukawa</style></author><author><style face="normal" font="default" size="100%">Lars Grenacher</style></author><author><style face="normal" font="default" size="100%">Joachim Hornegger</style></author><author><style face="normal" font="default" size="100%">Dagmar Kainmüller</style></author><author><style face="normal" font="default" size="100%">Richard I Kitney</style></author><author><style face="normal" font="default" size="100%">Hidefumi Kobatake</style></author><author><style face="normal" font="default" size="100%">Hans Lamecker</style></author><author><style face="normal" font="default" size="100%">Thomas Lange</style></author><author><style face="normal" font="default" size="100%">Jeongjin Lee</style></author><author><style face="normal" font="default" size="100%">Brian Lennon</style></author><author><style face="normal" font="default" size="100%">Rui Li</style></author><author><style face="normal" font="default" size="100%">Senhu Li</style></author><author><style face="normal" font="default" size="100%">Hans-Peter Meinzer</style></author><author><style face="normal" font="default" size="100%">Gábor Németh</style></author><author><style face="normal" font="default" size="100%">Daniela S Raicu</style></author><author><style face="normal" font="default" size="100%">Anne-Mareike Rau</style></author><author><style face="normal" font="default" size="100%">Eva M Van Rikxoort</style></author><author><style face="normal" font="default" size="100%">Mikael Rousson</style></author><author><style face="normal" font="default" size="100%">László Ruskó</style></author><author><style face="normal" font="default" size="100%">Kinda A Saddi</style></author><author><style face="normal" font="default" size="100%">Günter Schmidt</style></author><author><style face="normal" font="default" size="100%">Dieter Seghers</style></author><author><style face="normal" font="default" size="100%">Akinobi Shimizu</style></author><author><style face="normal" font="default" size="100%">Pieter Slagmolen</style></author><author><style face="normal" font="default" size="100%">Erich Sorantin</style></author><author><style face="normal" font="default" size="100%">Grzegorz Soza</style></author><author><style face="normal" font="default" size="100%">Ruchaneewan Susomboon</style></author><author><style face="normal" font="default" size="100%">Jonathan M Waite</style></author><author><style face="normal" font="default" size="100%">Andreas Wimmer</style></author><author><style face="normal" font="default" size="100%">Ivo Wolf</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Comparison and evaluation of methods for liver segmentation from CT datasets</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE TRANSACTIONS ON MEDICAL IMAGING</style></secondary-title><short-title><style face="normal" font="default" size="100%">IEEE T MED IMAGING</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Aug 2009</style></date></pub-dates></dates><pub-location><style face="normal" font="default" size="100%">Price, K., Anything you can do, I can do better (no you can't) (1986) Comput. Vis. Graph. Image Process, 36 (2-3), pp. 387-391;S. G. Armato, G. McLennan, M. F. McNitt-Gray, C. R. Meyer, D. Yankelevitz, D. R. Aberle, C. I. Henschke, E. A. Hoffman, E. A. Ka</style></pub-location><volume><style face="normal" font="default" size="100%">28</style></volume><pages><style face="normal" font="default" size="100%">1251 - 1265</style></pages><isbn><style face="normal" font="default" size="100%">0278-0062</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;This paper presents a comparison study between 10 automatic and six interactive methods for liver segmentation from contrast-enhanced CT images. It is based on results from the &quot;MICCAI 2007 Grand Challenge&quot; workshop, where 16 teams evaluated their algorithms on a common database. A collection of 20 clinical images with reference segmentations was provided to train and tune algorithms in advance. Participants were also allowed to use additional proprietary training data for that purpose. All teams then had to apply their methods to 10 test datasets and submit the obtained results. Employed algorithms include statistical shape models, atlas registration, level-sets, graph-cuts and rule-based systems. All results were compared to reference segmentations five error measures that highlight different aspects of segmentation accuracy. All measures were combined according to a specific scoring system relating the obtained values to human expert variability. In general, interactive methods reached higher average scores than automatic approaches and featured a better consistency of segmentation quality. However, the best automatic methods (mainly based on statistical shape models with some additional free deformation) could compete well on the majority of test images. The study provides an insight in performance of different segmentation approaches under real-world conditions and highlights achievements and limitations of current image analysis techniques. © 2009 IEEE.&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">8</style></issue><work-type><style face="normal" font="default" size="100%">Journal article</style></work-type><notes><style face="normal" font="default" size="100%">ScopusID: 68249121543doi: 10.1109/TMI.2009.2013851</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>25</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Márta Fidrich</style></author><author><style face="normal" font="default" size="100%">Eörs Máté</style></author><author><style face="normal" font="default" size="100%">László Gábor Nyúl</style></author><author><style face="normal" font="default" size="100%">Attila Kuba</style></author><author><style face="normal" font="default" size="100%">Bence Kiss</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Method and system for automatically segmenting organs from three dimensional computed tomography images</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2009</style></date></pub-dates></dates><pub-location><style face="normal" font="default" size="100%">Amerikai Egyesült Államok</style></pub-location><volume><style face="normal" font="default" size="100%">US20050907690</style></volume><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">US7545979</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">György Bekes</style></author><author><style face="normal" font="default" size="100%">Eörs Máté</style></author><author><style face="normal" font="default" size="100%">László Gábor Nyúl</style></author><author><style face="normal" font="default" size="100%">Attila Kuba</style></author><author><style face="normal" font="default" size="100%">Márta Fidrich</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Geometrical model-based segmentation of the organs of sight on CT images</style></title><secondary-title><style face="normal" font="default" size="100%">MEDICAL PHYSICS</style></secondary-title><short-title><style face="normal" font="default" size="100%">MED PHYS</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Feb 2008</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">35</style></volume><pages><style face="normal" font="default" size="100%">735 - 743</style></pages><isbn><style face="normal" font="default" size="100%">0094-2405</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;Segmentation of organs of sight such as the eyeballs, lenses,and optic nerves is a time consuming task for clinicians. The small size of the organs and the similar density of the surrounding tissues make the segmentation difficult. We developed a new algorithm to segment these organs with minimal user interaction. The algorithm needs only three seed points to fit an initial geometrical model to start an effective segmentation. The clinical evaluation shows that the output of our method is useful in clinical practice.&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">2</style></issue><work-type><style face="normal" font="default" size="100%">Journal article</style></work-type><notes><style face="normal" font="default" size="100%">UT: 000253318400036ScopusID: 38849194643doi: 10.1118/1.2826557</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>25</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Márta Fidrich</style></author><author><style face="normal" font="default" size="100%">Géza Makay</style></author><author><style face="normal" font="default" size="100%">Eörs Máté</style></author><author><style face="normal" font="default" size="100%">Emese Balogh</style></author><author><style face="normal" font="default" size="100%">Attila Kuba</style></author><author><style face="normal" font="default" size="100%">László Gábor Nyúl</style></author><author><style face="normal" font="default" size="100%">Judit Kanyó</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Systems and methods for segmenting an organ in a plurality of images</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2008</style></date></pub-dates></dates><pub-location><style face="normal" font="default" size="100%">Amerikai Egyesült Államok</style></pub-location><volume><style face="normal" font="default" size="100%">US20040858241</style></volume><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">US7388973</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">György Bekes</style></author><author><style face="normal" font="default" size="100%">László Gábor Nyúl</style></author><author><style face="normal" font="default" size="100%">Eörs Máté</style></author><author><style face="normal" font="default" size="100%">Attila Kuba</style></author><author><style face="normal" font="default" size="100%">Márta Fidrich</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">3D segmentation of liver, kidneys and spleen from CT images</style></title><secondary-title><style face="normal" font="default" size="100%">INTERNATIONAL JOURNAL OF COMPUTER ASSISTED RADIOLOGY AND SURGERY</style></secondary-title><short-title><style face="normal" font="default" size="100%">INT J COMPUT ASSIST RADIOL SURG</style></short-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year><pub-dates><date><style  face="normal" font="default" size="100%">June 2007</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">2</style></volume><pages><style face="normal" font="default" size="100%">S45 - S47</style></pages><isbn><style face="normal" font="default" size="100%">1861-6410</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;The clinicians often need to segment the abdominal organs forradiotherapy planning. Manual segmentation of these organs is very time-consuming, therefore automated methods are desired. We developed a semi-automatic segmentation method to outline liver, spleen and kidneys. It works on CT images without contrast intake that are acquired with a routine clinical protocol. From an initial surface around a user defined seed point, the segmentation of the organ is obtained by an active surface algorithm. Pre- and post-processing steps are used to adapt the general method for specific organs. The evaluation results show that the accuracy of our method is about 90%, which can be further improved with little manual editing, and that the precision is slightly higher than that of manual contouring. Our method is accurate, precise and fast enough to use in the clinical practice.&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">1 SUPPL.</style></issue><work-type><style face="normal" font="default" size="100%">Jounal article</style></work-type><notes><style face="normal" font="default" size="100%">ScopusID: 34250685687doi: 10.1007/s11548-007-0083-7</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors></contributors><titles><title><style face="normal" font="default" size="100%">Method for Automatically Segmenting the Spinal Cord and Canal from 3D CT Images</style></title><secondary-title><style face="normal" font="default" size="100%">Joint Hungarian-Austrian conference on image processing and pattern recognition. 5th conference of the Hungarian Association for Image Processing and Pattern Recognition (KÉPAF), 29th workshop of the Austrian Association for Pattern Reco</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2005///</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">OCG</style></publisher><pub-location><style face="normal" font="default" size="100%">Vienna</style></pub-location><pages><style face="normal" font="default" size="100%">311 - 318</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We present two approaches for automatically segmenting thespinal cord/canal from native CT images of the thorax region 
containing the spine. Different strategies are included to 
handle images where only part of the spinal column is visible. 
The algorithms require one seed point given on a slice located 
in the middle region of the spine, and the rest is automatic. 
The spatial extent of the spinal cord/canal is determined 
automatically using anatomical information for segmenting the 
spinal canal while active contours are applied if the spinal 
cord is to be segmented. Both methods work in 2D and use 
propagated information from neighboring slices. They are also 
very rapid in execution, that means an efficient, user-friendly 
workflow. The methods were evaluated by radiologists and were 
found to be useful and met the accuracy and repeatability 
requirements for the particular task.
</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors></contributors><titles><title><style face="normal" font="default" size="100%">Method for automatically segmenting the spinal cord and canal from 3D CT images</style></title><secondary-title><style face="normal" font="default" size="100%">Computer Analysis of Images and Patterns</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2005</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Springer-Verlag</style></publisher><pub-location><style face="normal" font="default" size="100%">Berlin; Heidelberg</style></pub-location><pages><style face="normal" font="default" size="100%">456 - 463</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">UT: 000232301200056</style></notes></record></records></xml>