<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tamás Levente</style></author><author><style face="normal" font="default" size="100%">Zoltan Kato</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jian Zhang</style></author><author><style face="normal" font="default" size="100%">Mohammed Bennamoun</style></author><author><style face="normal" font="default" size="100%">Dan Schonfeld</style></author><author><style face="normal" font="default" size="100%">Zhengyou Zhang</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Targetless Calibration of a Lidar - Perspective Camera Pair</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of ICCV Workshop on Big Data in 3D Computer Vision</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Dec 2013</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Sydney, NSW </style></pub-location><pages><style face="normal" font="default" size="100%">668 - 675</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;article&quot;&gt;&lt;p&gt;A novel method is proposed for the &lt;span class=&quot;snippet&quot;&gt;calibration&lt;/span&gt; of a &lt;span class=&quot;snippet&quot;&gt;camera&lt;/span&gt; - 3D &lt;span class=&quot;snippet&quot;&gt;lidar&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;pair&lt;/span&gt; without the use of any special &lt;span class=&quot;snippet&quot;&gt;calibration&lt;/span&gt; pattern or point correspondences. The proposed method has no specific assumption about the data source: plain depth information is expected from the &lt;span class=&quot;snippet&quot;&gt;lidar&lt;/span&gt; scan and a simple &lt;span class=&quot;snippet&quot;&gt;perspective&lt;/span&gt; &lt;span class=&quot;snippet&quot;&gt;camera&lt;/span&gt; is used for the 2D images. The &lt;span class=&quot;snippet&quot;&gt;calibration&lt;/span&gt; is solved as a 2D-3D registration problem using a minimum of one (for extrinsic) or two (for intrinsic-extrinsic) planar regions visible in both cameras. The registration is then traced back to the solution of a non-linear system of equations which directly provides the &lt;span class=&quot;snippet&quot;&gt;calibration&lt;/span&gt; parameters between the bases of the two sensors. The method has been tested on a large set of synthetic &lt;span class=&quot;snippet&quot;&gt;lidar&lt;/span&gt;-&lt;span class=&quot;snippet&quot;&gt;camera&lt;/span&gt; image &lt;span class=&quot;snippet&quot;&gt;pairs&lt;/span&gt; as well as on real data acquired in outdoor environment.&lt;/p&gt;&lt;/div&gt;&lt;p&gt;&amp;nbsp;&lt;/p&gt;</style></abstract><work-type><style face="normal" font="default" size="100%">Conference paper</style></work-type><accession-num><style face="normal" font="default" size="100%">14147882 </style></accession-num><notes><style face="normal" font="default" size="100%">doi: 10.1109/ICCVW.2013.92</style></notes></record></records></xml>