Publications

2024

  • M. Zeller, D. Casado Herraez, B. Ayan, J. Behley, M. Heidingsfeld, and C. Stachniss, “SemRaFiner: Panoptic Segmentation in Sparse and Noisy Radar Point Clouds,” IEEE Robotics and Automation Letters (RA-L), 2024.
    [BibTeX]
    @article{zeller2024ral,
    author = {M. Zeller and Casado Herraez, D. and B. Ayan and J. Behley and M. Heidingsfeld and C. Stachniss},
    title = {{SemRaFiner: Panoptic Segmentation in Sparse and Noisy Radar Point
    Clouds}},
    journal = ral,
    year = {2024},
    volume = {},
    number = {},
    pages = {},
    issn = {},
    doi = {},
    note = {Accepted},
    }

  • L. Wiesmann, T. Läbe, L. Nunes, J. Behley, and C. Stachniss, “Joint Intrinsic and Extrinsic Calibration of Perception Systems Utilizing a Calibration Environment,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 10, pp. 9103-9110, 2024. doi:10.1109/LRA.2024.3457385
    [BibTeX] [PDF]
    @article{wiesmann2024ral,
    author = {L. Wiesmann and T. L\"abe and L. Nunes and J. Behley and C. Stachniss},
    title = {{Joint Intrinsic and Extrinsic Calibration of Perception Systems Utilizing a Calibration Environment}},
    journal = ral,
    year = {2024},
    volume = {9},
    number = {10},
    pages = {9103-9110},
    issn = {2377-3766},
    doi = {10.1109/LRA.2024.3457385},
    }

  • F. Magistri, T. Läbe, E. Marks, S. Nagulavancha, Y. Pan, C. Smitt, L. Klingbeil, M. Halstead, H. Kuhlmann, C. McCool, J. Behley, and C. Stachniss, “A Dataset and Benchmark for Shape Completion of Fruits for Agricultural Robotics,” arXiv Preprint, 2024.
    [BibTeX] [PDF]
    @article{magistri2024arxiv,
    title={{A Dataset and Benchmark for Shape Completion of Fruits for Agricultural Robotics}},
    author={F. Magistri and T. L\"abe and E. Marks and S. Nagulavancha and Y. Pan and C. Smitt and L. Klingbeil and M. Halstead and H. Kuhlmann and C. McCool and J. Behley and C. Stachniss},
    journal = arxiv,
    year=2024,
    eprint={2407.13304},
    }

  • P. M. Blok, F. Magistri, C. Stachniss, H. Wang, J. Burridge, and W. Guo, “High-Throughput 3D Shape Completion of Potato Tubers on a Harvester,” arXiv Preprint, vol. arXiv:2407.21341, 2024.
    [BibTeX] [PDF]
    @article{blok2024arxiv,
    author = {P.M. Blok and F. Magistri and C. Stachniss and H. Wang and J. Burridge and W. Guo},
    title = {{High-Throughput 3D Shape Completion of Potato Tubers on a Harvester}},
    journal = arxiv,
    year = 2024,
    volume = {arXiv:2407.21341},
    url = {http://arxiv.org/pdf/2407.21341v1},
    }

  • Y. Pan, X. Zhong, L. Wiesmann, T. Posewsky, J. Behley, and C. Stachniss, “PIN-SLAM: LiDAR SLAM Using a Point-Based Implicit Neural Representation for Achieving Global Map Consistency,” IEEE Transactions on Robotics (TRO), vol. 40, pp. 4045-4064, 2024. doi:10.1109/TRO.2024.3422055
    [BibTeX] [PDF] [Code]
    @article{pan2024tro,
    author = {Y. Pan and X. Zhong and L. Wiesmann and T. Posewsky and J. Behley and C. Stachniss},
    title = {{PIN-SLAM: LiDAR SLAM Using a Point-Based Implicit Neural Representation for Achieving Global Map Consistency}},
    journal = tro,
    year = {2024},
    pages = {4045-4064},
    volume = {40},
    doi = {10.1109/TRO.2024.3422055},
    codeurl = {https://github.com/PRBonn/PIN_SLAM},
    }

  • J. Weyler, F. Magistri, E. Marks, Y. L. Chong, M. Sodano, G. Roggiolani, N. Chebrolu, C. Stachniss, and J. Behley, “PhenoBench: A Large Dataset and Benchmarks for Semantic Image Interpretation in the Agricultural Domain,” IEEE Trans. on Pattern Analysis and Machine Intelligence (TPAMI), 2024. doi:10.1109/TPAMI.2024.3419548
    [BibTeX] [PDF] [Code]
    @article{weyler2024tpami,
    author = {J. Weyler and F. Magistri and E. Marks and Y.L. Chong and M. Sodano and G. Roggiolani and N. Chebrolu and C. Stachniss and J. Behley},
    title = {{PhenoBench: A Large Dataset and Benchmarks for Semantic Image Interpretation in the Agricultural Domain}},
    journal = tpami,
    year = {2024},
    volume = {},
    number = {},
    pages = {},
    doi = {10.1109/TPAMI.2024.3419548},
    codeurl = {https://github.com/PRBonn/phenobench},
    }

  • D. Casado Herraez, L. Chang, M. Zeller, L. Wiesmann, J. Behley, M. Heidingsfeld, and C. Stachniss, “SPR: Single-Scan Radar Place Recognition,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 10, pp. 9079-9086, 2024.
    [BibTeX] [PDF]
    @article{casado-herraez2024ral,
    author = {Casado Herraez, D. and L. Chang and M. Zeller and L. Wiesmann and J. Behley and M. Heidingsfeld and C. Stachniss},
    title = {{SPR: Single-Scan Radar Place Recognition}},
    journal = ral,
    year = {2024},
    volume = {9},
    number = {10},
    pages = {9079-9086},
    }

  • A. Vashisth, J. Rückin, F. Magistri, C. Stachniss, and M. Popović, “Deep Reinforcement Learning with Dynamic Graphs for Adaptive Informative Path Planning,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 9, pp. 7747-7754, 2024. doi:10.1109/LRA.2024.3421188
    [BibTeX] [PDF] [Code]
    @article{vashisth2024ral,
    author = {A. Vashisth and J. R\"uckin and F. Magistri and C.
    Stachniss and M. Popovi\'c},
    title = {{Deep Reinforcement Learning with Dynamic Graphs for Adaptive
    Informative Path Planning}},
    journal = ral,
    volume = {9},
    number = {9},
    pages = {7747-7754},
    year = 2024,
    doi = {10.1109/LRA.2024.3421188},
    codeurl = {https://github.com/dmar-bonn/ipp-rl-3d},
    }

  • M. Popović, J. Ott, J. Rückin, and M. J. Kochenderfer, “Learning-based methods for adaptive informative path planning,” Robotics and Autonomous Systems, vol. 179, p. 104727, 2024.
    [BibTeX] [PDF] [Code]
    @article{popovic2024jras,
    title = {{Learning-based methods for adaptive informative path planning}},
    author = {Popovi{\'c}, Marija and Ott, Joshua and R{\"u}ckin, Julius and Kochenderfer, Mykel J},
    journal = jras,
    volume = {179},
    pages = {104727},
    year = {2024},
    codeurl = {https://dmar-bonn.github.io/aipp-survey},
    }

  • F. Magistri, Y. Pan, J. Bartels, J. Behley, C. Stachniss, and C. Lehnert, “Improving Robotic Fruit Harvesting Within Cluttered Environments Through 3D Shape Completion,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 8, p. 7357–7364, 2024. doi:10.1109/LRA.2024.3421788
    [BibTeX] [PDF]
    @article{magistri2024ral,
    author = {F. Magistri and Y. Pan and J. Bartels and J. Behley and C. Stachniss and C. Lehnert},
    title = {{Improving Robotic Fruit Harvesting Within Cluttered Environments
    Through 3D Shape Completion}},
    journal = ral,
    volume = {9},
    number = {8},
    pages = {7357--7364},
    year = 2024,
    doi = {10.1109/LRA.2024.3421788},
    }

  • I. B. Opra, B. Le Dem, J. Walls, D. Lukarski, and C. Stachniss, “Leveraging GNSS and Onboard Visual Data from Consumer Vehicles for Robust Road Network Estimation,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX] [PDF]
    @inproceedings{opra2024iros,
    author = {I.B. Opra and Le Dem, B. and J. Walls and D. Lukarski and C. Stachniss},
    title = {{Leveraging GNSS and Onboard Visual Data from Consumer Vehicles for Robust Road Network Estimation}},
    booktitle = iros,
    year = 2024,
    }

  • L. Lobefaro, M. V. R. Malladi, T. Guadagnino, and C. Stachniss, “Spatio-Temporal Consistent Mapping of Growing Plants for Agricultural Robots in the Wild,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX] [PDF] [Code]
    @inproceedings{lobefaro2024iros,
    author = {L. Lobefaro and M.V.R. Malladi and T. Guadagnino and C. Stachniss},
    title = {{Spatio-Temporal Consistent Mapping of Growing Plants for Agricultural Robots in the Wild}},
    booktitle = iros,
    year = 2024,
    codeurl = {https://github.com/PRBonn/spatio-temporal-mapping.git},
    }

  • E. A. Marks, J. Bömer, F. Magistri, A. Sah, J. Behley, and C. Stachniss, “BonnBeetClouds3D: A Dataset Towards Point Cloud-Based Organ-Level Phenotyping of Sugar Beet Plants Under Real Field Conditions,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX] [PDF]
    @inproceedings{marks2024iros,
    author = {E.A. Marks and J. B\"omer and F. Magistri and A. Sah and J. Behley and C. Stachniss},
    title = {{BonnBeetClouds3D: A Dataset Towards Point Cloud-Based Organ-Level Phenotyping of Sugar Beet Plants Under Real Field Conditions}},
    booktitle = iros,
    year = 2024,
    }

  • H. Lim, S. Jang, B. Mersch, J. Behley, H. Myung, and C. Stachniss, “HeLiMOS: A Dataset for Moving Object Segmentation in 3D Point Clouds From Heterogeneous LiDAR Sensors,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX] [PDF]
    @inproceedings{lim2024iros,
    author = {H. Lim and S. Jang and B. Mersch and J. Behley and H. Myung and C. Stachniss},
    title = {{HeLiMOS: A Dataset for Moving Object Segmentation in 3D Point Clouds From Heterogeneous LiDAR Sensors}},
    booktitle = iros,
    year = 2024,
    }

  • R. Schirmer, N. Vaskevicius, P. Biber, and C. Stachniss, “Fast Global Point Cloud Registration using Semantic NDT,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX]
    @inproceedings{schirmer2024iros,
    author = {R. Schirmer and N. Vaskevicius and P. Biber and C. Stachniss},
    title = {{Fast Global Point Cloud Registration using Semantic NDT}},
    booktitle = iros,
    year = 2024,
    }

  • L. Jin, H. Kuang, Y. Pan, C. Stachniss, and M. Popović, “STAIR: Semantic-Targeted Active Implicit Reconstruction,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX] [PDF] [Code]
    @inproceedings{jin2024iros,
    author = {L. Jin and H. Kuang and Y. Pan and C. Stachniss and M. Popovi\'c},
    title = {{STAIR: Semantic-Targeted Active Implicit Reconstruction}},
    booktitle = iros,
    year = 2024,
    codeurl = {https://github.com/dmar-bonn/stair}
    }

  • S. Pan, L. Jin, X. Huang, C. Stachniss, M. Popović, and M. Bennewitz, “Exploiting Priors from 3D Diffusion Models for RGB-Based One-Shot View Planning,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX] [PDF]
    @inproceedings{pan2024iros,
    author = {S. Pan and L. Jin and X. Huang and C. Stachniss and M. Popovi\'c and M. Bennewitz},
    title = {{Exploiting Priors from 3D Diffusion Models for RGB-Based One-Shot View Planning}},
    booktitle = iros,
    year = 2024,
    }

  • J. Bömer, F. Esser, E. A. Marks, R. A. Rosu, S. Behnke, L. Klingbeil, H. Kuhlmann, C. Stachniss, A. -K. Mahlein, and S. Paulus, “A 3D Printed Plant Model for Accurate and Reliable 3D Plant Phenotyping,” GigaScience, vol. 13, p. giae035, 2024. doi:10.1093/gigascience/giae035
    [BibTeX] [PDF]
    @article{boemer2024giga,
    author = {J. B\"omer and F. Esser and E.A. Marks and R.A. Rosu and S. Behnke and L. Klingbeil and H. Kuhlmann and C. Stachniss and A.-K. Mahlein and S. Paulus},
    title = {{A 3D Printed Plant Model for Accurate and Reliable 3D Plant Phenotyping}},
    journal = giga,
    volume = {13},
    number = {},
    pages = {giae035},
    issn = {2047-217X},
    year = 2024,
    doi = {10.1093/gigascience/giae035},
    url = {https://academic.oup.com/gigascience/article-pdf/doi/10.1093/gigascience/giae035/58270533/giae035.pdf},
    }

  • W. Förstner, Collected Notes, Institute for Geodesy and Geoinformation, StachnissLab, 2024.
    [BibTeX] [PDF]
    @Book{foerstner2024collected,
    author = {Wolfgang F{\"{o}}rstner},
    date = {2024},
    title = {Collected Notes},
    publisher = {Institute for Geodesy and Geoinformation, StachnissLab},
    year = {2024},
    url = {https://www.ipb.uni-bonn.de/html/staff/WolfgangFoerstner/collectednotes_v2/main-Lecturenotes.pdf},
    }

  • W. Förstner, “Cinderella Animations,” , 2024.
    [BibTeX] [PDF] [Code]
    @Report{foerstner2024cinderella,
    author = {Wolfgang F{\"{o}}rstner},
    date = {2024},
    institution = {{Institute for Geodesy and Geoinformation, StachnissLab}},
    title = {{Cinderella Animations}},
    type = {techreport},
    url = {https://www.ipb.uni-bonn.de/html/staff/WolfgangFoerstner/collectednotes_v2/Cinderella-Animations.pdf},
    year = {2024},
    codeurl = {https://github.com/PRBonn/cinderella-geometric-animations},
    }

  • H. Storm, S. J. Seidel, L. Klingbeil, F. Ewert, H. Vereecken, W. Amelung, S. Behnke, M. Bennewitz, J. Börner, T. Döring, J. Gall, A. -K. Mahlein, C. McCool, U. Rascher, S. Wrobel, A. Schnepf, C. Stachniss, and H. Kuhlmann, “Research Priorities to Leverage Smart Digital Technologies for Sustainable Crop Production,” European Journal of Agronomy, vol. 156, p. 127178, 2024. doi:https://doi.org/10.1016/j.eja.2024.127178
    [BibTeX] [PDF]
    @article{storm2024eja,
    author = {H. Storm and S.J. Seidel and L. Klingbeil and F. Ewert and H. Vereecken and W. Amelung and S. Behnke and M. Bennewitz and J. B\"orner and T. D\"oring and J. Gall and A.-K. Mahlein and C. McCool and U. Rascher and S. Wrobel and A. Schnepf and C. Stachniss and H. Kuhlmann},
    title = {{Research Priorities to Leverage Smart Digital Technologies for Sustainable Crop Production}},
    journal = {European Journal of Agronomy},
    volume = {156},
    pages = {127178},
    year = {2024},
    issn = {1161-0301},
    doi = {https://doi.org/10.1016/j.eja.2024.127178},
    url = {https://www.sciencedirect.com/science/article/pii/S1161030124000996},
    }

  • J. Hertzberg, B. Kisliuk, J. C. Krause, and C. Stachniss, “Interview: Cyrill Stachniss’ View on AI in Agriculture,” German Journal of Artificial Intelligence (KI), 2024. doi:10.1007/s13218-023-00831-8
    [BibTeX] [PDF]
    @article{hertzberg2024ki,
    author = {J. Hertzberg and B. Kisliuk and J.C. Krause and C. Stachniss},
    title = {{Interview: Cyrill Stachniss’ View on AI in Agriculture}},
    journal = {German Journal of Artificial Intelligence (KI)},
    year = {2024},
    doi = {10.1007/s13218-023-00831-8},
    url = {https://link.springer.com/article/10.1007/s13218-023-00831-8},
    }

  • M. Sodano, F. Magistri, L. Nunes, J. Behley, and C. Stachniss, “Open-World Semantic Segmentation Including Class Similarity,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{sodano2024cvpr,
    author = {M. Sodano and F. Magistri and L. Nunes and J. Behley and C. Stachniss},
    title = {{Open-World Semantic Segmentation Including Class Similarity}},
    booktitle = cvpr,
    year = 2024,
    codeurl = {https://github.com/PRBonn/ContMAV},
    videourl = {https://youtu.be/ei2cbyPQgag?si=_KabYyfjzzJZi1Zy},
    }

  • L. Nunes, R. Marcuzzi, B. Mersch, J. Behley, and C. Stachniss, “Scaling Diffusion Models to Real-World 3D LiDAR Scene Completion,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{nunes2024cvpr,
    author = {L. Nunes and R. Marcuzzi and B. Mersch and J. Behley and C. Stachniss},
    title = {{Scaling Diffusion Models to Real-World 3D LiDAR Scene Completion}},
    booktitle = cvpr,
    year = 2024,
    codeurl = {https://github.com/PRBonn/LiDiff},
    videourl = {https://youtu.be/XWu8svlMKUo},
    }

  • X. Zhong, Y. Pan, C. Stachniss, and J. Behley, “3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{zhong2024cvpr,
    author = {X. Zhong and Y. Pan and C. Stachniss and J. Behley},
    title = {{3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation}},
    booktitle = cvpr,
    year = 2024,
    codeurl = {https://github.com/PRBonn/4dNDF},
    videourl ={https://youtu.be/pRNKRcTkxjs}
    }

  • H. Yin, X. Xu, S. Lu, X. Chen, R. Xiong, S. Shen, C. Stachniss, and Y. Wang, “A Survey on Global LiDAR Localization: Challenges, Advances and Open Problems,” Intl. Journal of Computer Vision (IJCV), 2024. doi:10.1007/s11263-024-02019-5
    [BibTeX] [PDF]
    @article{yin2024ijcv,
    author = {H. Yin and X. Xu and S. Lu and X. Chen and R. Xiong and S. Shen and C. Stachniss and Y. Wang},
    title = {{A Survey on Global LiDAR Localization: Challenges, Advances and Open Problems}},
    journal = {Intl. Journal of Computer Vision (IJCV)},
    volume = {},
    number = {},
    pages = {},
    year = 2024,
    doi = {10.1007/s11263-024-02019-5},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/yin2024ijcv-preprint.pdf},
    }

  • S. Pan, L. Jin, X. Huang, C. Stachniss, M. Popovic, and M. Bennewitz, “Exploiting Priors from 3D Diffusion Models for RGB-Based One-Shot View Planning,” in In Proc. of the ICRA Workshop On Neural Fields In Robotics (RoboNerF), 2024.
    [BibTeX]
    @inproceedings{pan2024icraws,
    title={{Exploiting Priors from 3D Diffusion Models for {RGB}-Based One-Shot View Planning}},
    author={S. Pan and L. Jin and X. Huang and C. Stachniss and M. Popovic and M. Bennewitz},
    booktitle={In Proc. of the ICRA Workshop On Neural Fields In Robotics (RoboNerF)},
    year={2024},
    }

  • I. Hroob, B. Mersch, C. Stachniss, and M. Hanheide, “Generalizable Stable Points Segmentation for 3D LiDAR Scan-to-Map Long-Term Localization,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 4, pp. 3546-3553, 2024. doi:10.1109/LRA.2024.3368236
    [BibTeX] [PDF] [Code] [Video]
    @article{hroob2024ral,
    author = {I. Hroob and B. Mersch and C. Stachniss and M. Hanheide},
    title = {{Generalizable Stable Points Segmentation for 3D LiDAR Scan-to-Map Long-Term Localization}},
    journal = ral,
    volume = {9},
    number = {4},
    pages = {3546-3553},
    year = 2024,
    doi = {10.1109/LRA.2024.3368236},
    videourl = {https://youtu.be/aRLStFQEXbc},
    codeurl = {https://github.com/ibrahimhroob/SPS},
    }

  • M. Zeller, D. Casado Herraez, J. Behley, M. Heidingsfeld, and C. Stachniss, “Radar Tracker: Moving Instance Tracking in Sparse and Noisy Radar Point Clouds,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2024.
    [BibTeX] [PDF] [Video]
    @inproceedings{zeller2024icra,
    author = {M. Zeller and Casado Herraez, Daniel and J. Behley and M. Heidingsfeld and C. Stachniss},
    title = {{Radar Tracker: Moving Instance Tracking in Sparse and Noisy Radar Point Clouds}},
    booktitle = icra,
    year = 2024,
    videourl = {https://youtu.be/PixfkN8cMig},
    }

  • D. Casado Herraez, M. Zeller, L. Chang, I. Vizzo, M. Heidingsfeld, and C. Stachniss, “Radar-Only Odometry and Mapping for Autonomous Vehicles,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2024.
    [BibTeX] [PDF] [Video]
    @inproceedings{casado-herraez2024icra,
    author = {Casado Herraez, Daniel and M. Zeller and Chang, Le and I. Vizzo and M. Heidingsfeld and C. Stachniss},
    title = {{Radar-Only Odometry and Mapping for Autonomous Vehicles}},
    booktitle = icra,
    year = 2024,
    videourl = {https://youtu.be/_xWDXyyKEok}
    }

  • M. V. R. Malladi, T. Guadagnino, L. Lobefaro, M. Mattamala, H. Griess, J. Schweier, N. Chebrolu, M. Fallon, J. Behley, and C. Stachniss, “Tree Instance Segmentation and Traits Estimation for Forestry Environments Exploiting LiDAR Data ,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{malladi2024icra,
    author = {M.V.R. Malladi and T. Guadagnino and L. Lobefaro and M. Mattamala and H. Griess and J. Schweier and N. Chebrolu and M. Fallon and J. Behley and C. Stachniss},
    title = {{Tree Instance Segmentation and Traits Estimation for Forestry Environments Exploiting LiDAR Data }},
    booktitle = icra,
    year = 2024,
    videourl = {https://youtu.be/14uuCxmfGco},
    codeurl = {https://github.com/PRBonn/forest_inventory_pipeline},
    }

  • F. Magistri, R. Marcuzzi, E. A. Marks, M. Sodano, J. Behley, and C. Stachniss, “Efficient and Accurate Transformer-Based 3D Shape Completion and Reconstruction of Fruits for Agricultural Robots,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{magistri2024icra,
    author = {F. Magistri and R. Marcuzzi and E.A. Marks and M. Sodano and J. Behley and C. Stachniss},
    title = {{Efficient and Accurate Transformer-Based 3D Shape Completion and Reconstruction of Fruits for Agricultural Robots}},
    booktitle = icra,
    year = 2024,
    videourl = {https://youtu.be/U1xxnUGrVL4},
    codeurl = {https://github.com/PRBonn/TCoRe},
    }

  • S. Gupta, T. Guadagnino, B. Mersch, I. Vizzo, and C. Stachniss, “Effectively Detecting Loop Closures using Point Cloud Density Maps,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{gupta2024icra,
    author = {S. Gupta and T. Guadagnino and B. Mersch and I. Vizzo and C. Stachniss},
    title = {{Effectively Detecting Loop Closures using Point Cloud Density Maps}},
    booktitle = icra,
    year = 2024,
    codeurl = {https://github.com/PRBonn/MapClosures},
    videourl = {https://youtu.be/BpwR_aLXrNo},
    }

  • Y. Wu, T. Guadagnino, L. Wiesmann, L. Klingbeil, C. Stachniss, and H. Kuhlmann, “LIO-EKF: High Frequency LiDAR-Inertial Odometry using Extended Kalman Filters,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{wu2024icra,
    author = {Y. Wu and T. Guadagnino and L. Wiesmann and L. Klingbeil and C. Stachniss and H. Kuhlmann},
    title = {{LIO-EKF: High Frequency LiDAR-Inertial Odometry using Extended Kalman Filters}},
    booktitle = icra,
    year = 2024,
    codeurl = {https://github.com/YibinWu/LIO-EKF},
    videourl = {https://youtu.be/MoJTqEYl1ME},
    }

  • M. Zeller, V. S. Sandhu, B. Mersch, J. Behley, M. Heidingsfeld, and C. Stachniss, “Radar Instance Transformer: Reliable Moving Instance Segmentation in Sparse Radar Point Clouds,” IEEE Transactions on Robotics (TRO), vol. 40, pp. 2357-2372, 2024. doi:10.1109/TRO.2023.3338972
    [BibTeX] [PDF] [Video]
    @article{zeller2024tro,
    author = {M. Zeller and Sandhu, V.S. and B. Mersch and J. Behley and M. Heidingsfeld and C. Stachniss},
    title = {{Radar Instance Transformer: Reliable Moving Instance Segmentation in Sparse Radar Point Clouds}},
    journal = tro,
    year = {2024},
    volume = {40},
    doi = {10.1109/TRO.2023.3338972},
    pages = {2357-2372},
    videourl = {https://www.youtube.com/watch?v=v-iXbJEcqPM}
    }

  • J. Rückin, F. Magistri, C. Stachniss, and M. Popović, “Semi-Supervised Active Learning for Semantic Segmentation in Unknown Environments Using Informative Path Planning,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 3, pp. 2662-2669, 2024. doi:10.1109/LRA.2024.3359970
    [BibTeX] [PDF] [Code]
    @article{rueckin2024ral,
    author = {J. R\"uckin and F. Magistri and C. Stachniss and M. Popovi\'c},
    title = {{Semi-Supervised Active Learning for Semantic Segmentation in Unknown Environments Using Informative Path Planning}},
    journal = ral,
    year = {2024},
    volume = {9},
    number = {3},
    pages = {2662-2669},
    issn = {2377-3766},
    doi = {10.1109/LRA.2024.3359970},
    codeurl = {https://github.com/dmar-bonn/ipp-ssl},
    }

  • J. Weyler, T. Läbe, J. Behley, and C. Stachniss, “Panoptic Segmentation with Partial Annotations for Agricultural Robots,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 2, pp. 1660-1667, 2024. doi:10.1109/LRA.2023.3346760
    [BibTeX] [PDF] [Code]
    @article{weyler2024ral,
    author = {J. Weyler and T. L\"abe and J. Behley and C. Stachniss},
    title = {{Panoptic Segmentation with Partial Annotations for Agricultural Robots}},
    journal = ral,
    year = {2024},
    volume = {9},
    number = {2},
    pages = {1660-1667},
    issn = {2377-3766},
    doi = {10.1109/LRA.2023.3346760},
    codeurl = {https://github.com/PRBonn/PSPA}
    }

  • C. Smitt, M. A. Halstead, P. Zimmer, T. Läbe, E. Guclu, C. Stachniss, and C. S. McCool, “PAg-NeRF: Towards fast and efficient end-to-end panoptic 3D representations for agricultural robotics,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 1, pp. 907-914, 2024. doi:10.1109/LRA.2023.3338515
    [BibTeX] [PDF] [Code]
    @article{smitt2024ral-pagn,
    author = {C. Smitt and M.A. Halstead and P. Zimmer and T. L\"abe and E. Guclu and C. Stachniss and C.S. McCool},
    title = {{PAg-NeRF: Towards fast and efficient end-to-end panoptic 3D representations for agricultural robotics}},
    journal = ral,
    year = {2024},
    volume = {9},
    number = {1},
    pages = {907-914},
    issn = {2377-3766},
    doi = {10.1109/LRA.2023.3338515},
    codeurl = {https://github.com/Agricultural-Robotics-Bonn/pagnerf}
    }

2023

  • R. Roscher, L. Roth, C. Stachniss, and A. Walter, “Data-Centric Digital Agriculture: A Perspective,” arXiv Preprint, 2023.
    [BibTeX]
    @article{roscher2023arxiv-dcda,
    title={{Data-Centric Digital Agriculture: A Perspective}},
    author={R. Roscher and L. Roth and C. Stachniss and A. Walter},
    year={2023},
    eprint={2312.03437},
    journal = arxiv,
    year = {2023}
    }

  • C. Gomez, A. C. Hernandez, R. Barber, and C. Stachniss, “Localization Exploiting Semantic and Metric Information in Non-static Indoor Environments,” Journal of Intelligent & Robotic Systems, vol. 109, iss. 86, 2023. doi:https://doi.org/10.1007/s10846-023-02021-y
    [BibTeX] [PDF]
    @article{gomez2023jint,
    author = {C. Gomez and A.C. Hernandez and R. Barber and C. Stachniss},
    title = {Localization Exploiting Semantic and Metric Information in Non-static Indoor Environments},
    journal = jint,
    year = {2023},
    volume = {109},
    number = {86},
    doi = {https://doi.org/10.1007/s10846-023-02021-y},
    }

  • R. Marcuzzi, L. Nunes, L. Wiesmann, E. Marks, J. Behley, and C. Stachniss, “Mask4D: End-to-End Mask-Based 4D Panoptic Segmentation for LiDAR Sequences,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 11, pp. 7487-7494, 2023. doi:10.1109/LRA.2023.3320020
    [BibTeX] [PDF] [Code] [Video]
    @article{marcuzzi2023ral-meem,
    author = {R. Marcuzzi and L. Nunes and L. Wiesmann and E. Marks and J. Behley and C. Stachniss},
    title = {{Mask4D: End-to-End Mask-Based 4D Panoptic Segmentation for LiDAR Sequences}},
    journal = ral,
    year = {2023},
    volume = {8},
    number = {11},
    pages = {7487-7494},
    issn = {2377-3766},
    doi = {10.1109/LRA.2023.3320020},
    codeurl = {https://github.com/PRBonn/Mask4D},
    videourl = {https://youtu.be/4WqK_gZlpfA},
    }

  • G. Roggiolani, F. Magistri, T. Guadagnino, J. Behley, and C. Stachniss, “Unsupervised Pre-Training for 3D Leaf Instance Segmentation,” IEEE Robotics and Automation Letters (RA-L), vol. 8, pp. 7448-7455, 2023. doi:10.1109/LRA.2023.3320018
    [BibTeX] [PDF] [Code] [Video]
    @article{roggiolani2023ral,
    author = {G. Roggiolani and F. Magistri and T. Guadagnino and J. Behley and C. Stachniss},
    title = {{Unsupervised Pre-Training for 3D Leaf Instance Segmentation}},
    journal = ral,
    year = {2023},
    volume = {8},
    issue = {11},
    codeurl = {https://github.com/PRBonn/Unsupervised-Pre-Training-for-3D-Leaf-Instance-Segmentation},
    pages = {7448-7455},
    doi = {10.1109/LRA.2023.3320018},
    issn = {2377-3766},
    videourl = {https://youtu.be/PbYVPPwVeKg},
    }

  • J. Rückin, F. Magistri, C. Stachniss, and M. Popovic, “An Informative Path Planning Framework for Active Learning in UAV-based Semantic Mapping,” IEEE Transactions on Robotics (TRO), vol. 39, iss. 6, pp. 4279-4296, 2023. doi:10.1109/TRO.2023.3313811
    [BibTeX] [PDF] [Code]
    @article{rueckin2023tro,
    author = {J. R\"{u}ckin and F. Magistri and C. Stachniss and M. Popovic},
    title = {{An Informative Path Planning Framework for Active Learning in UAV-based Semantic Mapping}},
    journal = tro,
    year = {2023},
    codeurl = {https://github.com/dmar-bonn/ipp-al-framework},
    doi={10.1109/TRO.2023.3313811},
    volume={39},
    number={6},
    pages={4279-4296},
    }

  • F. Magistri, J. Weyler, D. Gogoll, P. Lottes, J. Behley, N. Petrinic, and C. Stachniss, “From one Field to Another – Unsupervised Domain Adaptation for Semantic Segmentation in Agricultural Robotics,” Computers and Electronics in Agriculture, vol. 212, p. 108114, 2023. doi:https://doi.org/10.1016/j.compag.2023.108114
    [BibTeX] [PDF]
    @article{magistri2023cea,
    author = {F. Magistri and J. Weyler and D. Gogoll and P. Lottes and J. Behley and N. Petrinic and C. Stachniss},
    title = {From one Field to Another – Unsupervised Domain Adaptation for Semantic Segmentation in Agricultural Robotics},
    journal = cea,
    year = {2023},
    volume = {212},
    pages = {108114},
    doi = {https://doi.org/10.1016/j.compag.2023.108114},
    }

  • I. Vizzo, B. Mersch, L. Nunes, L. Wiesmann, T. Guadagnino, and C. Stachniss, “Toward Reproducible Version-Controlled Perception Platforms: Embracing Simplicity in Autonomous Vehicle Dataset Acquisition,” in Proc. of the Intl. Conf. on Intelligent Transportation Systems Workshops, 2023.
    [BibTeX] [PDF] [Code]
    @inproceedings{vizzo2023itcsws,
    author = {I. Vizzo and B. Mersch and L. Nunes and L. Wiesmann and T. Guadagnino and C. Stachniss},
    title = {{Toward Reproducible Version-Controlled Perception Platforms: Embracing Simplicity in Autonomous Vehicle Dataset Acquisition}},
    booktitle = {Proc. of the Intl. Conf. on Intelligent Transportation Systems Workshops},
    year = 2023,
    codeurl = {https://github.com/ipb-car/meta-workspace},
    note = {accepted}
    }

  • B. Mersch, T. Guadagnino, X. Chen, I. Vizzo, J. Behley, and C. Stachniss, “Building Volumetric Beliefs for Dynamic Environments Exploiting Map-Based Moving Object Segmentation,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 8, pp. 5180-5187, 2023. doi:10.1109/LRA.2023.3292583
    [BibTeX] [PDF] [Code] [Video]
    @article{mersch2023ral,
    author = {B. Mersch and T. Guadagnino and X. Chen and I. Vizzo and J. Behley and C. Stachniss},
    title = {{Building Volumetric Beliefs for Dynamic Environments Exploiting Map-Based Moving Object Segmentation}},
    journal = ral,
    volume = {8},
    number = {8},
    pages = {5180-5187},
    year = 2023,
    issn = {2377-3766},
    doi = {10.1109/LRA.2023.3292583},
    videourl = {https://youtu.be/aeXhvkwtDbI},
    codeurl = {https://github.com/PRBonn/MapMOS},
    }

  • Y. L. Chong, J. Weyler, P. Lottes, J. Behley, and C. Stachniss, “Unsupervised Generation of Labeled Training Images for Crop-Weed Segmentation in New Fields and on Different Robotic Platforms,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 8, p. 5259–5266, 2023. doi:10.1109/LRA.2023.3293356
    [BibTeX] [PDF] [Code] [Video]
    @article{chong2023ral,
    author = {Y.L. Chong and J. Weyler and P. Lottes and J. Behley and C. Stachniss},
    title = {{Unsupervised Generation of Labeled Training Images for Crop-Weed Segmentation in New Fields and on Different Robotic Platforms}},
    journal = ral,
    volume = {8},
    number = {8},
    pages = {5259--5266},
    year = 2023,
    issn = {2377-3766},
    doi = {10.1109/LRA.2023.3293356},
    videourl = {https://youtu.be/SpvrR9sgf2k},
    codeurl = {https://github.com/PRBonn/StyleGenForLabels}
    }

  • L. Lobefaro, M. V. R. Malladi, O. Vysotska, T. Guadagnino, and C. Stachniss, “Estimating 4D Data Associations Towards Spatial-Temporal Mapping of Growing Plants for Agricultural Robots,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{lobefaro2023iros,
    author = {L. Lobefaro and M.V.R. Malladi and O. Vysotska and T. Guadagnino and C. Stachniss},
    title = {{Estimating 4D Data Associations Towards Spatial-Temporal Mapping of Growing Plants for Agricultural Robots}},
    booktitle = iros,
    year = 2023,
    codeurl = {https://github.com/PRBonn/plants_temporal_matcher},
    videourl = {https://youtu.be/HpJPIzmXoag}
    }

  • Y. Pan, F. Magistri, T. Läbe, E. Marks, C. Smitt, C. S. McCool, J. Behley, and C. Stachniss, “Panoptic Mapping with Fruit Completion and Pose Estimation for Horticultural Robots,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{pan2023iros,
    author = {Y. Pan and F. Magistri and T. L\"abe and E. Marks and C. Smitt and C.S. McCool and J. Behley and C. Stachniss},
    title = {{Panoptic Mapping with Fruit Completion and Pose Estimation for Horticultural Robots}},
    booktitle = iros,
    year = 2023,
    codeurl = {https://github.com/PRBonn/HortiMapping},
    videourl = {https://youtu.be/fSyHBhskjqA}
    }

  • Y. Goel, N. Vaskevicius, L. Palmieri, N. Chebrolu, K. O. Arras, and C. Stachniss, “Semantically Informed MPC for Context-Aware Robot Exploration,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF]
    @inproceedings{goel2023iros,
    author = {Y. Goel and N. Vaskevicius and L. Palmieri and N. Chebrolu and K.O. Arras and C. Stachniss},
    title = {{Semantically Informed MPC for Context-Aware Robot Exploration}},
    booktitle = iros,
    year = 2023,
    }

  • N. Zimmerman, M. Sodano, E. Marks, J. Behley, and C. Stachniss, “Constructing Metric-Semantic Maps using Floor Plan Priors for Long-Term Indoor Localization,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{zimmerman2023iros,
    author = {N. Zimmerman and M. Sodano and E. Marks and J. Behley and C. Stachniss},
    title = {{Constructing Metric-Semantic Maps using Floor Plan Priors for Long-Term Indoor Localization}},
    booktitle = iros,
    year = 2023,
    codeurl = {https://github.com/PRBonn/SIMP},
    videourl = {https://youtu.be/9ZGd5lJbG4s}
    }

  • L. Jin, X. Chen, J. Rückin, and M. Popović, “NeU-NBV: Next Best View Planning Using Uncertainty Estimation in Image-Based Neural Rendering,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF] [Code]
    @inproceedings{jin2023iros,
    title = {{NeU-NBV: Next Best View Planning Using Uncertainty Estimation in Image-Based Neural Rendering}},
    booktitle = iros,
    author = {Jin, Liren and Chen, Xieyuanli and Rückin, Julius and Popović, Marija},
    year = {2023},
    codeurl = {https://github.com/dmar-bonn/neu-nbv},
    }

  • J. Westheider, J. Rückin, and M. Popović, “Multi-UAV Adaptive Path Planning Using Deep Reinforcement Learning,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF] [Code]
    @inproceedings{westheider2023iros,
    title = {{Multi-UAV Adaptive Path Planning Using Deep Reinforcement Learning}},
    author = {Westheider, Jonas and R{\"u}ckin, Julius and Popovi{\'c}, Marija},
    booktitle = iros,
    year = {2023},
    codeurl = {https://github.com/dmar-bonn/ipp-marl},
    }

  • T. Zaenker, J. Rückin, R. Menon, M. Popović, and M. Bennewitz, “Graph-based view motion planning for fruit detection,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF]
    @inproceedings{zaenker2023iros,
    title = {{Graph-based view motion planning for fruit detection}},
    author = {Zaenker, Tobias and R{\"u}ckin, Julius and Menon, Rohit and Popovi{\'c}, Marija and Bennewitz, Maren},
    booktitle = iros,
    year = {2023},
    }

  • J. Weyler, F. Magistri, E. Marks, Y. L. Chong, M. Sodano, G. Roggiolani, N. Chebrolu, C. Stachniss, and J. Behley, “PhenoBench –- A Large Dataset and Benchmarks for Semantic Image Interpretation in the Agricultural Domain,” arXiv preprint, vol. arXiv:2306.04557, 2023.
    [BibTeX] [PDF] [Code]
    @article{weyler2023arxiv,
    author = {Jan Weyler and Federico Magistri and Elias Marks and Yue Linn Chong and Matteo Sodano
    and Gianmarco Roggiolani and Nived Chebrolu and Cyrill Stachniss and Jens Behley},
    title = {{PhenoBench --- A Large Dataset and Benchmarks for Semantic Image Interpretation
    in the Agricultural Domain}},
    journal = {arXiv preprint},
    volume = {arXiv:2306.04557},
    year = {2023},
    codeurl = {https://github.com/PRBonn/phenobench}
    }

  • W. Förstner, “Friedrich Ackermann’s scientific research program,” Geo-spatial Information Science, pp. 1-10, 2023. doi:10.1080/10095020.2023.2231273
    [BibTeX] [PDF]
    @Article{foerstner23:friedrich,
    author = {Wolfgang Förstner},
    journal = {Geo-spatial Information Science},
    title = {Friedrich Ackermann’s scientific research program},
    year = {2023},
    number = {0},
    pages = {1-10},
    volume = {0},
    doi = {10.1080/10095020.2023.2231273},
    eprint = {https://doi.org/10.1080/10095020.2023.2231273},
    publisher = {Taylor & Francis},
    url = {https://www.tandfonline.com/doi/pdf/10.1080/10095020.2023.2231273?download=true},
    }

  • D. Barath, D. Mishkin, M. Polic, W. Förstner, and J. Matas, “A Large-Scale Homography Benchmark,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023, pp. 21360-21370.
    [BibTeX] [PDF]
    @InProceedings{Barath2023cvpr,
    author = {Barath, Daniel and Mishkin, Dmytro and Polic, Michal and F\"orstner, Wolfgang and Matas, Jiri},
    title = {A Large-Scale Homography Benchmark},
    booktitle = cvpr,
    year = {2023},
    pages = {21360-21370},
    url = {https://openaccess.thecvf.com/content/CVPR2023/papers/Barath_A_Large-Scale_Homography_Benchmark_CVPR_2023_paper.pdf},
    }

  • L. Wiesmann, T. Guadagnino, I. Vizzo, N. Zimmerman, Y. Pan, H. Kuang, J. Behley, and C. Stachniss, “LocNDF: Neural Distance Field Mapping for Robot Localization,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 8, p. 4999–5006, 2023. doi:10.1109/LRA.2023.3291274
    [BibTeX] [PDF] [Code] [Video]
    @article{wiesmann2023ral-icra,
    author = {L. Wiesmann and T. Guadagnino and I. Vizzo and N. Zimmerman and Y. Pan and H. Kuang and J. Behley and C. Stachniss},
    title = {{LocNDF: Neural Distance Field Mapping for Robot Localization}},
    journal = ral,
    volume = {8},
    number = {8},
    pages = {4999--5006},
    year = 2023,
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wiesmann2023ral-icra.pdf},
    issn = {2377-3766},
    doi = {10.1109/LRA.2023.3291274},
    codeurl = {https://github.com/PRBonn/LocNDF},
    videourl = {https://youtu.be/-0idH21BpMI},
    }

  • E. Marks, M. Sodano, F. Magistri, L. Wiesmann, D. Desai, R. Marcuzzi, J. Behley, and C. Stachniss, “High Precision Leaf Instance Segmentation in Point Clouds Obtained Under Real Field Conditions,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 8, pp. 4791-4798, 2023. doi:10.1109/LRA.2023.3288383
    [BibTeX] [PDF] [Code] [Video]
    @article{marks2023ral,
    author = {E. Marks and M. Sodano and F. Magistri and L. Wiesmann and D. Desai and R. Marcuzzi and J. Behley and C. Stachniss},
    title = {{High Precision Leaf Instance Segmentation in Point Clouds Obtained Under Real Field Conditions}},
    journal = ral,
    pages = {4791-4798},
    volume = {8},
    number = {8},
    issn = {2377-3766},
    year = {2023},
    doi = {10.1109/LRA.2023.3288383},
    codeurl = {https://github.com/PRBonn/plant_pcd_segmenter},
    videourl = {https://youtu.be/dvA1SvQ4iEY}
    }

  • L. Peters, V. Rubies Royo, C. Tomlin, L. Ferranti, J. Alonso-Mora, C. Stachniss, and D. Fridovich-Keil, “Online and Offline Learning of Player Objectives from Partial Observations in Dynamic Games,” The Intl. Journal of Robotics Research, 2023.
    [BibTeX] [PDF] [Code] [Video]
    @article{peters2023ijrr,
    title = {{Online and Offline Learning of Player Objectives from Partial Observations in Dynamic Games}},
    author = {Peters, L. and Rubies Royo, V. and Tomlin, C. and Ferranti, L. and Alonso-Mora, J. and Stachniss, C. and Fridovich-Keil, D.},
    journal = ijrr,
    year = {2023},
    url = {https://journals.sagepub.com/doi/reader/10.1177/02783649231182453},
    codeurl = {https://github.com/PRBonn/PartiallyObservedInverseGames.jl},
    videourl = {https://www.youtube.com/watch?v=BogCsYQX9Pc},
    }

  • H. Lim, L. Nunes, B. Mersch, X. Chen, J. Behley, H. Myung, and C. Stachniss, “ERASOR2: Instance-Aware Robust 3D Mapping of the Static World in Dynamic Scenes,” in Proc. of Robotics: Science and Systems (RSS), 2023.
    [BibTeX] [PDF]
    @inproceedings{lim2023rss,
    author = {H. Lim and L. Nunes and B. Mersch and X. Chen and J. Behley and H. Myung and C. Stachniss},
    title = {{ERASOR2: Instance-Aware Robust 3D Mapping of the Static World in Dynamic Scenes}},
    booktitle = rss,
    year = 2023,
    }

  • J. Weyler, T. Läbe, F. Magistri, J. Behley, and C. Stachniss, “Towards Domain Generalization in Crop and Weed Segmentation for Precision Farming Robots,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 6, pp. 3310-3317, 2023. doi:10.1109/LRA.2023.3262417
    [BibTeX] [PDF] [Code]
    @article{weyler2023ral,
    author = {J. Weyler and T. L\"abe and F. Magistri and J. Behley and C. Stachniss},
    title = {{Towards Domain Generalization in Crop and Weed Segmentation for Precision Farming Robots}},
    journal = ral,
    pages = {3310-3317},
    volume = 8,
    number = 6,
    issn = {2377-3766},
    year = {2023},
    doi = {10.1109/LRA.2023.3262417},
    codeurl = {https://github.com/PRBonn/DG-CWS},
    }

  • L. Nunes, L. Wiesmann, R. Marcuzzi, X. Chen, J. Behley, and C. Stachniss, “Temporal Consistent 3D LiDAR Representation Learning for Semantic Perception in Autonomous Driving,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{nunes2023cvpr,
    author = {L. Nunes and L. Wiesmann and R. Marcuzzi and X. Chen and J. Behley and C. Stachniss},
    title = {{Temporal Consistent 3D LiDAR Representation Learning for Semantic Perception in Autonomous Driving}},
    booktitle = cvpr,
    year = 2023,
    codeurl = {https://github.com/PRBonn/TARL},
    videourl = {https://youtu.be/0CtDbwRYLeo},
    }

  • H. Kuang, X. Chen, T. Guadagnino, N. Zimmerman, J. Behley, and C. Stachniss, “IR-MCL: Implicit Representation-Based Online Global Localization,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 3, p. 1627–1634, 2023. doi:10.1109/LRA.2023.3239318
    [BibTeX] [PDF] [Code]
    @article{kuang2023ral,
    author = {Kuang, Haofei and Chen, Xieyuanli and Guadagnino, Tiziano and Zimmerman, Nicky and Behley, Jens and Stachniss, Cyrill},
    title = {{IR-MCL: Implicit Representation-Based Online Global Localization}},
    journal = ral,
    volume = {8},
    number = {3},
    pages = {1627--1634},
    doi = {10.1109/LRA.2023.3239318},
    year = {2023},
    codeurl = {https://github.com/PRBonn/ir-mcl},
    }

  • X. Zhong, Y. Pan, J. Behley, and C. Stachniss, “SHINE-Mapping: Large-Scale 3D Mapping Using Sparse Hierarchical Implicit Neural Representations,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{zhong2023icra,
    author = {Zhong, Xingguang and Pan, Yue and Behley, Jens and Stachniss, Cyrill},
    title = {{SHINE-Mapping: Large-Scale 3D Mapping Using Sparse Hierarchical Implicit Neural Representations}},
    booktitle = icra,
    year = 2023,
    codeurl = {https://github.com/PRBonn/SHINE_mapping},
    videourl = {https://youtu.be/jRqIupJgQZE},
    }

  • M. Sodano, F. Magistri, T. Guadagnino, J. Behley, and C. Stachniss, “Robust Double-Encoder Network for RGB-D Panoptic Segmentation,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{sodano2023icra,
    author = {Matteo Sodano and Federico Magistri and Tiziano Guadagnino and Jens Behley and Cyrill Stachniss},
    title = {{Robust Double-Encoder Network for RGB-D Panoptic Segmentation}},
    booktitle = icra,
    year = 2023,
    codeurl = {https://github.com/PRBonn/PS-res-excite},
    videourl = {https://youtu.be/r1pabV3sQYk}
    }

  • S. Kelly, A. Riccardi, E. Marks, F. Magistri, T. Guadagnino, M. Chli, and C. Stachniss, “Target-Aware Implicit Mapping for Agricultural Crop Inspection,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Video]
    @inproceedings{kelly2023icra,
    author = {Shane Kelly and Alessandro Riccardi and Elias Marks and Federico Magistri and Tiziano Guadagnino and Margarita Chli and Cyrill Stachniss},
    title = {{Target-Aware Implicit Mapping for Agricultural Crop Inspection}},
    booktitle = icra,
    year = 2023,
    videourl = {https://youtu.be/UAIqn0QnpKg}
    }

  • A. Riccardi, S. Kelly, E. Marks, F. Magistri, T. Guadagnino, J. Behley, M. Bennewitz, and C. Stachniss, “Fruit Tracking Over Time Using High-Precision Point Clouds,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Video]
    @inproceedings{riccardi2023icra,
    author = {Alessandro Riccardi and Shane Kelly and Elias Marks and Federico Magistri and Tiziano Guadagnino and Jens Behley and Maren Bennewitz and Cyrill Stachniss},
    title = {{Fruit Tracking Over Time Using High-Precision Point Clouds}},
    booktitle = icra,
    year = 2023,
    videourl = {https://youtu.be/fBGSd0--PXY}
    }

  • G. Roggiolani, M. Sodano, F. Magistri, T. Guadagnino, J. Behley, and C. Stachniss, “Hierarchical Approach for Joint Semantic, Plant Instance, and Leaf Instance Segmentation in the Agricultural Domain,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{roggiolani2023icra-hajs,
    author = {G. Roggiolani and M. Sodano and F. Magistri and T. Guadagnino and J. Behley and C. Stachniss},
    title = {{Hierarchical Approach for Joint Semantic, Plant Instance, and Leaf Instance Segmentation in the Agricultural Domain}},
    booktitle = icra,
    year = {2023},
    codeurl = {https://github.com/PRBonn/HAPT},
    videourl = {https://youtu.be/miuOJjxlJic}
    }

  • G. Roggiolani, F. Magistri, T. Guadagnino, J. Weyler, G. Grisetti, C. Stachniss, and J. Behley, “On Domain-Specific Pre-Training for Effective Semantic Perception in Agricultural Robotics,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{roggiolani2023icra-odsp,
    author = {G. Roggiolani and F. Magistri and T. Guadagnino and J. Weyler and G. Grisetti and C. Stachniss and J. Behley},
    title = {{On Domain-Specific Pre-Training for Effective Semantic Perception in Agricultural Robotics}},
    booktitle = icra,
    year = 2023,
    codeurl= {https://github.com/PRBonn/agri-pretraining},
    videourl = {https://youtu.be/FDWY_UnfsBs}
    }

  • H. Dong, X. Chen, M. Dusmanu, V. Larsson, M. Pollefeys, and C. Stachniss, “Learning-Based Dimensionality Reduction for Computing Compact and Effective Local Feature Descriptors,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Code]
    @inproceedings{dong2023icra,
    author = {H. Dong and X. Chen and M. Dusmanu and V. Larsson and M. Pollefeys and C. Stachniss},
    title = {{Learning-Based Dimensionality Reduction for Computing Compact and Effective Local Feature Descriptors}},
    booktitle = icra,
    year = 2023,
    codeurl = {https://github.com/PRBonn/descriptor-dr}
    }

  • M. Zeller, V. S. Sandhu, B. Mersch, J. Behley, M. Heidingsfeld, and C. Stachniss, “Radar Velocity Transformer: Single-scan Moving Object Segmentation in Noisy Radar Point Clouds,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Video]
    @inproceedings{zeller2023icra,
    author = {M. Zeller and V.S. Sandhu and B. Mersch and J. Behley and M. Heidingsfeld and C. Stachniss},
    title = {{Radar Velocity Transformer: Single-scan Moving Object Segmentation in Noisy Radar Point Clouds}},
    booktitle = icra,
    year = 2023,
    videourl = {https://youtu.be/dTDgzWIBgpE}
    }

  • I. Vizzo, T. Guadagnino, B. Mersch, L. Wiesmann, J. Behley, and C. Stachniss, “KISS-ICP: In Defense of Point-to-Point ICP – Simple, Accurate, and Robust Registration If Done the Right Way,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 2, pp. 1-8, 2023. doi:10.1109/LRA.2023.3236571
    [BibTeX] [PDF] [Code] [Video]
    @article{vizzo2023ral,
    author = {Vizzo, Ignacio and Guadagnino, Tiziano and Mersch, Benedikt and Wiesmann, Louis and Behley, Jens and Stachniss, Cyrill},
    title = {{KISS-ICP: In Defense of Point-to-Point ICP -- Simple, Accurate, and Robust Registration If Done the Right Way}},
    journal = ral,
    pages = {1-8},
    doi = {10.1109/LRA.2023.3236571},
    volume = {8},
    number = {2},
    year = {2023},
    codeurl = {https://github.com/PRBonn/kiss-icp},
    videourl = {https://youtu.be/h71aGiD-uxU}
    }

  • R. Marcuzzi, L. Nunes, L. Wiesmann, J. Behley, and C. Stachniss, “Mask-Based Panoptic LiDAR Segmentation for Autonomous Driving,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 2, p. 1141–1148, 2023. doi:10.1109/LRA.2023.3236568
    [BibTeX] [PDF] [Code] [Video]
    @article{marcuzzi2023ral,
    author = {R. Marcuzzi and L. Nunes and L. Wiesmann and J. Behley and C. Stachniss},
    title = {{Mask-Based Panoptic LiDAR Segmentation for Autonomous Driving}},
    journal = ral,
    volume = {8},
    number = {2},
    pages = {1141--1148},
    year = 2023,
    doi = {10.1109/LRA.2023.3236568},
    videourl = {https://youtu.be/I8G9VKpZux8},
    codeurl = {https://github.com/PRBonn/MaskPLS},
    }

  • L. Wiesmann, L. Nunes, J. Behley, and C. Stachniss, “KPPR: Exploiting Momentum Contrast for Point Cloud-Based Place Recognition,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 2, pp. 592-599, 2023. doi:10.1109/LRA.2022.3228174
    [BibTeX] [PDF] [Code] [Video]
    @article{wiesmann2023ral,
    author = {L. Wiesmann and L. Nunes and J. Behley and C. Stachniss},
    title = {{KPPR: Exploiting Momentum Contrast for Point Cloud-Based Place Recognition}},
    journal = ral,
    volume = {8},
    number = {2},
    pages = {592-599},
    year = 2023,
    issn = {2377-3766},
    doi = {10.1109/LRA.2022.3228174},
    codeurl = {https://github.com/PRBonn/kppr},
    videourl = {https://youtu.be/bICz1sqd8Xs}
    }

  • Y. Wu, J. Kuang, X. Niu, J. Behley, L. Klingbeil, and H. Kuhlmann, “Wheel-SLAM: Simultaneous Localization and Terrain Mapping Using One Wheel-mounted IMU,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 1, p. 280–287, 2023. doi:10.1109/LRA.2022.3226071
    [BibTeX] [PDF] [Code]
    @article{wu2023ral,
    author = {Y. Wu and J. Kuang and X. Niu and J. Behley and L. Klingbeil and H. Kuhlmann},
    title = {{Wheel-SLAM: Simultaneous Localization and Terrain Mapping Using One Wheel-mounted IMU}},
    journal = ral,
    volume = {8},
    number = {1},
    pages = {280--287},
    year = 2023,
    doi = {10.1109/LRA.2022.3226071},
    codeurl = {https://github.com/i2Nav-WHU/Wheel-SLAM}
    }

  • M. Zeller, J. Behley, M. Heidingsfeld, and C. Stachniss, “Gaussian Radar Transformer for Semantic Segmentation in Noisy Radar Data,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 1, p. 344–351, 2023. doi:10.1109/LRA.2022.3226030
    [BibTeX] [PDF] [Video]
    @article{zeller2023ral,
    author = {M. Zeller and J. Behley and M. Heidingsfeld and C. Stachniss},
    title = {{Gaussian Radar Transformer for Semantic Segmentation in Noisy Radar Data}},
    journal = ral,
    volume = {8},
    number = {1},
    pages = {344--351},
    year = 2023,
    doi = {10.1109/LRA.2022.3226030},
    videourl = {https://youtu.be/uNlNkYoG-tA}
    }

  • N. Zimmerman, T. Guadagnino, X. Chen, J. Behley, and C. Stachniss, “Long-Term Localization using Semantic Cues in Floor Plan Maps,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 1, pp. 176-183, 2023. doi:10.1109/LRA.2022.3223556
    [BibTeX] [PDF] [Code]
    @article{zimmerman2023ral,
    author = {N. Zimmerman and T. Guadagnino and X. Chen and J. Behley and C. Stachniss},
    title = {{Long-Term Localization using Semantic Cues in Floor Plan Maps}},
    journal = ral,
    year = {2023},
    volume = {8},
    number = {1},
    pages = {176-183},
    issn = {2377-3766},
    doi = {10.1109/LRA.2022.3223556},
    codeurl = {https://github.com/PRBonn/hsmcl}
    }

  • H. Müller, N. Zimmerman, T. Polonelli, M. Magno, J. Behley, C. Stachniss, and L. Benini, “Fully On-board Low-Power Localization with Multizone Time-of-Flight Sensors on Nano-UAVs,” in Proc. of Design, Automation & Test in Europe Conference & Exhibition (DATE), 2023.
    [BibTeX] [PDF]
    @inproceedings{mueller2023date,
    title = {{Fully On-board Low-Power Localization with Multizone Time-of-Flight Sensors on Nano-UAVs}},
    author = {H. M{\"u}ller and N. Zimmerman and T. Polonelli and M. Magno and J. Behley and C. Stachniss and L. Benini},
    booktitle = {Proc. of Design, Automation \& Test in Europe Conference \& Exhibition (DATE)},
    year = {2023},
    }

  • M. Arora, L. Wiesmann, X. Chen, and C. Stachniss, “Static Map Generation from 3D LiDAR Point Clouds Exploiting Ground Segmentation,” Robotics and Autonomous Systems, vol. 159, p. 104287, 2023. doi:https://doi.org/10.1016/j.robot.2022.104287
    [BibTeX] [PDF] [Code]
    @article{arora2023jras,
    author = {M. Arora and L. Wiesmann and X. Chen and C. Stachniss},
    title = {{Static Map Generation from 3D LiDAR Point Clouds Exploiting Ground Segmentation}},
    journal = jras,
    volume = {159},
    pages = {104287},
    year = {2023},
    issn = {0921-8890},
    doi = {https://doi.org/10.1016/j.robot.2022.104287},
    codeurl = {https://github.com/PRBonn/dynamic-point-removal},
    }

  • F. Stache, J. Westheider, F. Magistri, C. Stachniss, and M. Popovic, “Adaptive Path Planning for UAVs for Multi-Resolution Semantic Segmentation,” Robotics and Autonomous Systems, vol. 159, p. 104288, 2023. doi:10.1016/j.robot.2022.104288
    [BibTeX] [PDF]
    @article{stache2023jras,
    author = {F. Stache and J. Westheider and F. Magistri and C. Stachniss and M. Popovic},
    title = {{Adaptive Path Planning for UAVs for Multi-Resolution Semantic Segmentation}},
    journal = jras,
    volume = {159},
    pages = {104288},
    year = {2023},
    issn = {0921-8890},
    doi = {10.1016/j.robot.2022.104288},
    }

  • H. Dong, X. Chen, S. Särkkä, and C. Stachniss, “Online pole segmentation on range images for long-term LiDAR localization in urban environments,” Robotics and Autonomous Systems, vol. 159, p. 104283, 2023. doi:https://doi.org/10.1016/j.robot.2022.104283
    [BibTeX] [PDF] [Code]
    @article{dong2023jras,
    title = {Online pole segmentation on range images for long-term LiDAR localization in urban environments},
    journal = {Robotics and Autonomous Systems},
    volume ={159},
    pages = {104283},
    year = {2023},
    issn = {0921-8890},
    doi = {https://doi.org/10.1016/j.robot.2022.104283},
    author = {H. Dong and X. Chen and S. S{\"a}rkk{\"a} and C. Stachniss},
    codeurl = {https://github.com/PRBonn/pole-localization},
    url = {https://arxiv.org/pdf/2208.07364.pdf},
    }

2022

  • X. Chen, “LiDAR-Based Semantic Perception for Autonomous Vehicles,” PhD Thesis, 2022.
    [BibTeX] [PDF]
    @phdthesis{chen2022phd,
    author = {Xieyuanli Chen},
    title = {{LiDAR-Based Semantic Perception for Autonomous Vehicles}},
    school = {University of Bonn},
    year = 2022,
    month = sep,
    url = {https://hdl.handle.net/20.500.11811/10228},
    urn = https://nbn-resolving.org/urn:nbn:de:hbz:5-67873,
    }

  • L. Di Giammarino, L. Brizi, T. Guadagnino, C. Stachniss, and G. Grisetti, “MD-SLAM: Multi-Cue Direct SLAM,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2022.
    [BibTeX] [PDF] [Code]
    @inproceedings{digiammarino2022iros,
    title={{MD-SLAM: Multi-Cue Direct SLAM}},
    author={Di Giammarino, L. and Brizi, L. and Guadagnino, T. and Stachniss, C. and Grisetti, G.},
    booktitle = iros,
    year = {2022},
    codeurl = {https://github.com/digiamm/md_slam},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/digiammarino2022iros.pdf},
    }

  • N. Zimmerman, L. Wiesmann, T. Guadagnino, T. Läbe, J. Behley, and C. Stachniss, “Robust Onboard Localization in Changing Environments Exploiting Text Spotting,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2022.
    [BibTeX] [PDF] [Code]
    @inproceedings{zimmerman2022iros,
    title = {{Robust Onboard Localization in Changing Environments Exploiting Text Spotting}},
    author = {N. Zimmerman and L. Wiesmann and T. Guadagnino and T. Läbe and J. Behley and C. Stachniss},
    booktitle = iros,
    year = {2022},
    codeurl = {https://github.com/PRBonn/tmcl},
    }

  • Y. Pan, Y. Kompis, L. Bartolomei, R. Mascaro, C. Stachniss, and M. Chli, “Voxfield: Non-Projective Signed Distance Fields for Online Planning and 3D Reconstruction,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2022.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{pan2022iros,
    title = {{Voxfield: Non-Projective Signed Distance Fields for Online Planning and 3D Reconstruction}},
    author = {Y. Pan and Y. Kompis and L. Bartolomei and R. Mascaro and C. Stachniss and M. Chli},
    booktitle = iros,
    year = {2022},
    codeurl = {https://github.com/VIS4ROB-lab/voxfield},
    videourl ={https://youtu.be/JS_yeq-GR4A},
    }

  • J. Rückin, L. Jin, F. Magistri, C. Stachniss, and M. Popović, “Informative Path Planning for Active Learning in Aerial Semantic Mapping,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2022.
    [BibTeX] [PDF] [Code]
    @InProceedings{rueckin2022iros,
    author = {J. R{\"u}ckin and L. Jin and F. Magistri and C. Stachniss and M. Popovi\'c},
    title = {{Informative Path Planning for Active Learning in Aerial Semantic Mapping}},
    booktitle = iros,
    year = {2022},
    codeurl = {https://github.com/dmar-bonn/ipp-al},
    }

  • J. Rückin, L. Jin, and M. Popović, “Adaptive Informative Path Planning Using Deep Reinforcement Learning for UAV-based Active Sensing,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2022.
    [BibTeX] [PDF] [Code]
    @inproceedings{rueckin2022icra,
    author = {R{\"u}ckin, Julius and Jin, Liren and Popović, Marija},
    booktitle = icra,
    title = {{Adaptive Informative Path Planning Using Deep Reinforcement Learning for UAV-based Active Sensing}},
    year = {2022},
    codeurl = {https://github.com/dmar-bonn/ipp-rl},
    }

  • F. Magistri, E. Marks, S. Nagulavancha, I. Vizzo, T. Läbe, J. Behley, M. Halstead, C. McCool, and C. Stachniss, “Contrastive 3D Shape Completion and Reconstruction for Agricultural Robots using RGB-D Frames,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 4, pp. 10120-10127, 2022.
    [BibTeX] [PDF] [Video]
    @article{magistri2022ral-iros,
    author = {Federico Magistri and Elias Marks and Sumanth Nagulavancha and Ignacio Vizzo and Thomas L{\"a}be and Jens Behley and Michael Halstead and Chris McCool and Cyrill Stachniss},
    title = {Contrastive 3D Shape Completion and Reconstruction for Agricultural Robots using RGB-D Frames},
    journal = ral,
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/magistri2022ral-iros.pdf},
    year = {2022},
    volume={7},
    number={4},
    pages={10120-10127},
    videourl = {https://www.youtube.com/watch?v=2ErUf9q7YOI},
    }

  • Y. Goel, N. Vaskevicius, L. Palmieri, N. Chebrolu, and C. Stachniss, “Predicting Dense and Context-aware Cost Maps for Semantic Robot Navigation,” in IROS Workshop on Perception and Navigation for Autonomous Robotics in Unstructured and Dynamic Environments, 2022.
    [BibTeX] [PDF]
    @inproceedings{goel2022irosws,
    title = {{Predicting Dense and Context-aware Cost Maps for Semantic Robot Navigation}},
    author = {Y. Goel and N. Vaskevicius and L. Palmieri and N. Chebrolu and C. Stachniss},
    booktitle = {IROS Workshop on Perception and Navigation for Autonomous Robotics in Unstructured and Dynamic Environments},
    year = {2022},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/goel2022irosws.pdf},
    }

  • I. Vizzo, B. Mersch, R. Marcuzzi, L. Wiesmann, J. Behley, and C. Stachniss, “Make it Dense: Self-Supervised Geometric Scan Completion of Sparse 3D LiDAR Scans in Large Outdoor Environments,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 3, pp. 8534-8541, 2022. doi:10.1109/LRA.2022.3187255
    [BibTeX] [PDF] [Code] [Video]
    @article{vizzo2022ral,
    author = {I. Vizzo and B. Mersch and R. Marcuzzi and L. Wiesmann and J. Behley and C. Stachniss},
    title = {Make it Dense: Self-Supervised Geometric Scan Completion of Sparse 3D LiDAR Scans in Large Outdoor Environments},
    journal = ral,
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/vizzo2022ral-iros.pdf},
    codeurl = {https://github.com/PRBonn/make_it_dense},
    year = {2022},
    volume = {7},
    number = {3},
    pages = {8534-8541},
    doi = {10.1109/LRA.2022.3187255},
    videourl = {https://youtu.be/NVjURcArHn8},
    }

  • J. Sun, Y. Wang, M. Feng, D. Wang, J. Zhao, C. Stachniss, and X. Chen, “ICK-Track: A Category-Level 6-DoF Pose Tracker Using Inter-Frame Consistent Keypoints for Aerial Manipulation,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2022.
    [BibTeX] [PDF] [Code]
    @inproceedings{sun2022iros,
    title = {{ICK-Track: A Category-Level 6-DoF Pose Tracker Using Inter-Frame Consistent Keypoints for Aerial Manipulation}},
    author = {Jingtao Sun and Yaonan Wang and Mingtao Feng and Danwei Wang and Jiawen Zhao and Cyrill Stachniss and Xieyuanli Chen},
    booktitle = iros,
    year = {2022},
    codeurl = {https://github.com/S-JingTao/ICK-Track}
    }

  • L. Nunes, X. Chen, R. Marcuzzi, A. Osep, L. Leal-Taixé, C. Stachniss, and J. Behley, “Unsupervised Class-Agnostic Instance Segmentation of 3D LiDAR Data for Autonomous Vehicles,” IEEE Robotics and Automation Letters (RA-L), 2022. doi:10.1109/LRA.2022.3187872
    [BibTeX] [PDF] [Code] [Video]
    @article{nunes2022ral-3duis,
    author = {Lucas Nunes and Xieyuanli Chen and Rodrigo Marcuzzi and Aljosa Osep and Laura Leal-Taixé and Cyrill Stachniss and Jens Behley},
    title = {{Unsupervised Class-Agnostic Instance Segmentation of 3D LiDAR Data for Autonomous Vehicles}},
    journal = ral,
    url = {https://www.ipb.uni-bonn.de/pdfs/nunes2022ral-iros.pdf},
    codeurl = {https://github.com/PRBonn/3DUIS},
    videourl= {https://youtu.be/cgv0wUaqLAE},
    doi = {10.1109/LRA.2022.3187872},
    year = 2022
    }

  • B. Mersch, X. Chen, I. Vizzo, L. Nunes, J. Behley, and C. Stachniss, “Receding Moving Object Segmentation in 3D LiDAR Data Using Sparse 4D Convolutions,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 3, p. 7503–7510, 2022. doi:10.1109/LRA.2022.3183245
    [BibTeX] [PDF] [Code] [Video]
    @article{mersch2022ral,
    author = {B. Mersch and X. Chen and I. Vizzo and L. Nunes and J. Behley and C. Stachniss},
    title = {{Receding Moving Object Segmentation in 3D LiDAR Data Using Sparse 4D Convolutions}},
    journal = ral,
    year = 2022,
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/mersch2022ral.pdf},
    volume = {7},
    number = {3},
    pages = {7503--7510},
    doi = {10.1109/LRA.2022.3183245},
    codeurl = {https://github.com/PRBonn/4DMOS},
    videourl = {https://youtu.be/5aWew6caPNQ},
    }

  • T. Guadagnino, X. Chen, M. Sodano, J. Behley, G. Grisetti, and C. Stachniss, “Fast Sparse LiDAR Odometry Using Self-Supervised Feature Selection on Intensity Images,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 3, pp. 7597-7604, 2022. doi:10.1109/LRA.2022.3184454
    [BibTeX] [PDF]
    @article{guadagnino2022ral,
    author = {T. Guadagnino and X. Chen and M. Sodano and J. Behley and G. Grisetti and C. Stachniss},
    title = {{Fast Sparse LiDAR Odometry Using Self-Supervised Feature Selection on Intensity Images}},
    journal = ral,
    year = 2022,
    volume = {7},
    number = {3},
    pages = {7597-7604},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/guadagnino2022ral-iros.pdf},
    issn = {2377-3766},
    doi = {10.1109/LRA.2022.3184454}
    }

  • L. Wiesmann, T. Guadagnino, I. Vizzo, G. Grisetti, J. Behley, and C. Stachniss, “DCPCR: Deep Compressed Point Cloud Registration in Large-Scale Outdoor Environments,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 3, pp. 6327-6334, 2022. doi:10.1109/LRA.2022.3171068
    [BibTeX] [PDF] [Code] [Video]
    @article{wiesmann2022ral-iros,
    author = {L. Wiesmann and T. Guadagnino and I. Vizzo and G. Grisetti and J. Behley and C. Stachniss},
    title = {{DCPCR: Deep Compressed Point Cloud Registration in Large-Scale Outdoor Environments}},
    journal = ral,
    year = 2022,
    volume = 7,
    number = 3,
    pages = {6327-6334},
    issn = {2377-3766},
    doi = {10.1109/LRA.2022.3171068},
    codeurl = {https://github.com/PRBonn/DCPCR},
    videourl = {https://youtu.be/RqLr2RTGy1s},
    }

  • L. Peters, D. Fridovich-Keil, L. Ferranti, C. Stachniss, J. Alonso-Mora, and F. Laine, “Learning Mixed Strategies in Trajectory Games,” in Proc. of Robotics: Science and Systems (RSS), 2022.
    [BibTeX] [PDF]
    @inproceedings{peters2022rss,
    title = {{Learning Mixed Strategies in Trajectory Games}},
    author = {L. Peters and D. Fridovich-Keil and L. Ferranti and C. Stachniss and J. Alonso-Mora and F. Laine},
    booktitle = rss,
    year = {2022},
    url = {https://arxiv.org/pdf/2205.00291}
    }

  • X. Chen, B. Mersch, L. Nunes, R. Marcuzzi, I. Vizzo, J. Behley, and C. Stachniss, “Automatic Labeling to Generate Training Data for Online LiDAR-Based Moving Object Segmentation,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 3, pp. 6107-6114, 2022. doi:10.1109/LRA.2022.3166544
    [BibTeX] [PDF] [Code] [Video]
    @article{chen2022ral,
    author = {X. Chen and B. Mersch and L. Nunes and R. Marcuzzi and I. Vizzo and J. Behley and C. Stachniss},
    title = {{Automatic Labeling to Generate Training Data for Online LiDAR-Based Moving Object Segmentation}},
    journal = ral,
    year = 2022,
    volume = 7,
    number = 3,
    pages = {6107-6114},
    url = {https://arxiv.org/pdf/2201.04501},
    issn = {2377-3766},
    doi = {10.1109/LRA.2022.3166544},
    codeurl = {https://github.com/PRBonn/auto-mos},
    videourl = {https://youtu.be/3V5RA1udL4c},
    }

  • S. Yang, L. Zheng, X. Chen, L. Zabawa, M. Zhang, and M. Wang, “Transfer Learning from Synthetic In-vitro Soybean Pods Dataset for In-situ Segmentation of On-branch Soybean Pod,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition Workshops, 2022, pp. 1666-1675.
    [BibTeX] [PDF]
    @inproceedings{yang2022cvprws,
    author = {Yang, Si and Zheng, Lihua and Chen, Xieyuanli and Zabawa, Laura and Zhang, Man and Wang, Minjuan},
    title = {{Transfer Learning from Synthetic In-vitro Soybean Pods Dataset for In-situ Segmentation of On-branch Soybean Pod}},
    booktitle = {Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition Workshops},
    url={https://openaccess.thecvf.com/content/CVPR2022W/AgriVision/papers/Yang_Transfer_Learning_From_Synthetic_In-Vitro_Soybean_Pods_Dataset_for_In-Situ_CVPRW_2022_paper.pdf},
    pages={1666-1675},
    year = 2022,
    }

  • I. Vizzo, T. Guadagnino, J. Behley, and C. Stachniss, “VDBFusion: Flexible and Efficient TSDF Integration of Range Sensor Data,” Sensors, vol. 22, iss. 3, 2022. doi:10.3390/s22031296
    [BibTeX] [PDF] [Code]
    @article{vizzo2022sensors,
    author = {Vizzo, I. and Guadagnino, T. and Behley, J. and Stachniss, C.},
    title = {VDBFusion: Flexible and Efficient TSDF Integration of Range Sensor Data},
    journal = {Sensors},
    volume = {22},
    year = {2022},
    number = {3},
    article-number = {1296},
    url = {https://www.mdpi.com/1424-8220/22/3/1296},
    issn = {1424-8220},
    doi = {10.3390/s22031296},
    codeurl = {https://github.com/PRBonn/vdbfusion},
    }

  • L. Wiesmann, R. Marcuzzi, C. Stachniss, and J. Behley, “Retriever: Point Cloud Retrieval in Compressed 3D Maps,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2022.
    [BibTeX] [PDF]
    @inproceedings{wiesmann2022icra,
    author = {L. Wiesmann and R. Marcuzzi and C. Stachniss and J. Behley},
    title = {{Retriever: Point Cloud Retrieval in Compressed 3D Maps}},
    booktitle = icra,
    year = 2022,
    }

  • E. Marks, F. Magistri, and C. Stachniss, “Precise 3D Reconstruction of Plants from UAV Imagery Combining Bundle Adjustment and Template Matching,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2022.
    [BibTeX] [PDF]
    @inproceedings{marks2022icra,
    author = {E. Marks and F. Magistri and C. Stachniss},
    title = {{Precise 3D Reconstruction of Plants from UAV Imagery Combining Bundle Adjustment and Template Matching}},
    booktitle = icra,
    year = 2022,
    }

  • J. Weyler, J. Quakernack, P. Lottes, J. Behley, and C. Stachniss, “Joint Plant and Leaf Instance Segmentation on Field-Scale UAV Imagery,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 2, pp. 3787-3794, 2022. doi:10.1109/LRA.2022.3147462
    [BibTeX] [PDF]
    @article{weyler2022ral,
    author = {J. Weyler and J. Quakernack and P. Lottes and J. Behley and C. Stachniss},
    title = {{Joint Plant and Leaf Instance Segmentation on Field-Scale UAV Imagery}},
    journal = ral,
    year = 2022,
    doi = {10.1109/LRA.2022.3147462},
    issn = {377-3766},
    volume = {7},
    number = {2},
    pages = {3787-3794},
    }

  • L. Nunes, R. Marcuzzi, X. Chen, J. Behley, and C. Stachniss, “SegContrast: 3D Point Cloud Feature Representation Learning through Self-supervised Segment Discrimination,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 2, pp. 2116-2123, 2022. doi:10.1109/LRA.2022.3142440
    [BibTeX] [PDF] [Code] [Video]
    @article{nunes2022ral,
    author = {L. Nunes and R. Marcuzzi and X. Chen and J. Behley and C. Stachniss},
    title = {{SegContrast: 3D Point Cloud Feature Representation Learning through Self-supervised Segment Discrimination}},
    journal = ral,
    year = 2022,
    doi = {10.1109/LRA.2022.3142440},
    issn = {2377-3766},
    volume = {7},
    number = {2},
    pages = {2116-2123},
    url = {https://www.ipb.uni-bonn.de/pdfs/nunes2022ral-icra.pdf},
    codeurl = {https://github.com/PRBonn/segcontrast},
    videourl = {https://youtu.be/kotRb_ySnIw},
    }

  • R. Marcuzzi, L. Nunes, L. Wiesmann, I. Vizzo, J. Behley, and C. Stachniss, “Contrastive Instance Association for 4D Panoptic Segmentation using Sequences of 3D LiDAR Scans,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 2, pp. 1550-1557, 2022. doi:10.1109/LRA.2022.3140439
    [BibTeX] [PDF]
    @article{marcuzzi2022ral,
    author = {R. Marcuzzi and L. Nunes and L. Wiesmann and I. Vizzo and J. Behley and C. Stachniss},
    title = {{Contrastive Instance Association for 4D Panoptic Segmentation using Sequences of 3D LiDAR Scans}},
    journal = ral,
    year = 2022,
    doi = {10.1109/LRA.2022.3140439},
    issn = {2377-3766},
    volume = 7,
    number = 2,
    pages = {1550-1557},
    }

  • J. Weyler, F. Magistri, P. Seitz, J. Behley, and C. Stachniss, “In-Field Phenotyping Based on Crop Leaf and Plant Instance Segmentation,” in Proc. of the Winter Conf. on Applications of Computer Vision (WACV), 2022.
    [BibTeX] [PDF]
    @inproceedings{weyler2022wacv,
    author = {J. Weyler and F. Magistri and P. Seitz and J. Behley and C. Stachniss},
    title = {{In-Field Phenotyping Based on Crop Leaf and Plant Instance Segmentation}},
    booktitle = wacv,
    year = 2022,
    }

  • S. Li, X. Chen, Y. Liu, D. Dai, C. Stachniss, and J. Gall, “Multi-scale Interaction for Real-time LiDAR Data Segmentation on an Embedded Platform,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 2, pp. 738-745, 2022. doi:10.1109/LRA.2021.3132059
    [BibTeX] [PDF] [Code] [Video]
    @article{li2022ral,
    author = {S. Li and X. Chen and Y. Liu and D. Dai and C. Stachniss and J. Gall},
    title = {{Multi-scale Interaction for Real-time LiDAR Data Segmentation on an Embedded Platform}},
    journal = ral,
    year = 2022,
    doi = {10.1109/LRA.2021.3132059},
    issn = {2377-3766},
    volume = 7,
    number = 2,
    pages = {738-745},
    codeurl = {https://github.com/sj-li/MINet},
    videourl = {https://youtu.be/WDhtz5tZ5vQ},
    }

  • L. Jin, J. Rückin, S. H. Kiss, T. Vidal-Calleja, and M. Popović, “Adaptive-resolution field mapping using Gaussian process fusion with integral kernels,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 3, p. 7471–7478, 2022.
    [BibTeX] [PDF] [Code]
    @article{jin2022ral,
    title={{Adaptive-resolution field mapping using Gaussian process fusion with integral kernels}},
    author={Jin, Liren and R{\"u}ckin, Julius and Kiss, Stefan H and Vidal-Calleja, Teresa and Popovi{\'c}, Marija},
    journal=ral,
    volume={7},
    number={3},
    pages={7471--7478},
    year={2022},
    codeurl = {https://github.com/dmar-bonn/argpf_mapping},
    }

2021

  • K. Schindler and W. Förstner, “Photogrammetry,” in Computer Vision, A Reference Guide, 2nd Edition, K. Ikeuchi, Ed., , 2021. doi:10.1007/978-3-030-63416-2
    [BibTeX] [PDF]

    This comprehensive reference provides easy access to relevant information on all aspects of Computer Vision. An A-Z format of over 240 entries offers a diverse range of topics for those seeking entry into any aspect within the broad field of Computer Vision. Over 200 Authors from both industry and academia contributed to this volume. Each entry includes synonyms, a definition and discussion of the topic, and a robust bibliography. Extensive cross-references to other entries support efficient, user-friendly searches for immediate access to relevant information. Entries were peer-reviewed by a distinguished international advisory board, both scientifically and geographically diverse, ensuring balanced coverage. Over 3700 bibliographic references for further reading enable deeper exploration into any of the topics covered. The content of Computer Vision: A Reference Guide is expository and tutorial, making the book a practical resource for students who are considering entering the field, as well as professionals in other fields who need to access this vital information but may not have the time to work their way through an entire text on their topic of interest.

    @InCollection{schindler2021inbook,
    author = {Konrad Schindler and Wolfgang F{\"{o}}rstner},
    booktitle = {{Computer Vision, {A} Reference Guide, 2nd Edition}},
    title = {Photogrammetry},
    editor = {{K. Ikeuchi}},
    abstract = {This comprehensive reference provides easy access to relevant information on all aspects of Computer Vision. An A-Z format of over 240 entries offers a diverse range of topics for those seeking entry into any aspect within the broad field of Computer Vision. Over 200 Authors from both industry and academia contributed to this volume. Each entry includes synonyms, a definition and discussion of the topic, and a robust bibliography. Extensive cross-references to other entries support efficient, user-friendly searches for immediate access to relevant information. Entries were peer-reviewed by a distinguished international advisory board, both scientifically and geographically diverse, ensuring balanced coverage. Over 3700 bibliographic references for further reading enable deeper exploration into any of the topics covered. The content of Computer Vision: A Reference Guide is expository and tutorial, making the book a practical resource for students who are considering entering the field, as well as professionals in other fields who need to access this vital information but may not have the time to work their way through an entire text on their topic of interest. },
    http = {{https://link.springer.com/content/pdf/bfm%3A978-3-030-63416-2%2F1.pdf}},
    doi = {10.1007/978-3-030-63416-2},
    page = {968--970},
    year = {2021},
    }

  • H. Kuang, Y. Zhu, Z. Zhang, X. Li, J. Tighe, S. Schwertfeger, C. Stachniss, and M. Li, “Video Contrastive Learning With Global Context,” in Proc. of the Intl.~Conf. on Computer Vision Workshops, 2021, pp. 3195-3204.
    [BibTeX] [PDF] [Code]
    @inproceedings{kuang2021iccvws,
    author = {Kuang, Haofei and Zhu, Yi and Zhang, Zhi and Li, Xinyu and Tighe, Joseph and Schwertfeger, S\"oren and Stachniss, Cyrill and Li, Mu},
    title = {{Video Contrastive Learning With Global Context}},
    booktitle = iccvws,
    year = {2021},
    pages = {3195-3204},
    codeurl = {https://github.com/amazon-research/video-contrastive-learning},
    url = {https://openaccess.thecvf.com/content/ICCV2021W/CVEU/papers/Kuang_Video_Contrastive_Learning_With_Global_Context_ICCVW_2021_paper.pdf},
    }

  • A. Barreto, P. Lottes, F. R. Ispizua, S. Baumgarten, N. A. Wolf, C. Stachniss, A. -K. Mahlein, and S. Paulus, “Automatic UAV-based counting of seedlings in sugar-beet field and extension to maize and strawberry,” Computers and Electronics in Agriculture, 2021.
    [BibTeX] [PDF]
    @article{barreto2021cea,
    author = {A. Barreto and P. Lottes and F.R. Ispizua and S. Baumgarten and N.A. Wolf and C. Stachniss and A.-K. Mahlein and S. Paulus},
    title = {Automatic UAV-based counting of seedlings in sugar-beet field and extension to maize and strawberry
    },
    journal = {Computers and Electronics in Agriculture},
    year = {2021},
    }

  • B. Mersch, X. Chen, J. Behley, and C. Stachniss, “Self-supervised Point Cloud Prediction Using 3D Spatio-temporal Convolutional Networks,” in Proc. of the Conf. on Robot Learning (CoRL), 2021.
    [BibTeX] [PDF] [Code] [Video]
    @InProceedings{mersch2021corl,
    author = {B. Mersch and X. Chen and J. Behley and C. Stachniss},
    title = {{Self-supervised Point Cloud Prediction Using 3D Spatio-temporal Convolutional Networks}},
    booktitle = corl,
    year = {2021},
    url = {https://www.ipb.uni-bonn.de/pdfs/mersch2021corl.pdf},
    codeurl = {https://github.com/PRBonn/point-cloud-prediction},
    videourl = {https://youtu.be/-pSZpPgFAso},
    }

  • J. Behley, M. Garbade, A. Milioto, J. Quenzel, S. Behnke, J. Gall, and C. Stachniss, “Towards 3D LiDAR-based semantic scene understanding of 3D point cloud sequences: The SemanticKITTI Dataset,” The Intl. Journal of Robotics Research, vol. 40, iss. 8-9, pp. 959-967, 2021. doi:10.1177/02783649211006735
    [BibTeX] [PDF]
    @article{behley2021ijrr,
    author = {J. Behley and M. Garbade and A. Milioto and J. Quenzel and S. Behnke and J. Gall and C. Stachniss},
    title = {Towards 3D LiDAR-based semantic scene understanding of 3D point cloud sequences: The SemanticKITTI Dataset},
    journal = ijrr,
    volume = {40},
    number = {8-9},
    pages = {959-967},
    year = {2021},
    doi = {10.1177/02783649211006735},
    url = {https://www.ipb.uni-bonn.de/pdfs/behley2021ijrr.pdf}
    }

  • A. Pretto, S. Aravecchia, W. Burgard, N. Chebrolu, C. Dornhege, T. Falck, F. Fleckenstein, A. Fontenla, M. Imperoli, R. Khanna, F. Liebisch, P. Lottes, A. Milioto, D. Nardi, S. Nardi, J. Pfeifer, M. Popovic, C. Potena, C. Pradalier, E. Rothacker-Feder, I. Sa, A. Schaefer, R. Siegwart, C. Stachniss, A. Walter, V. Winterhalter, X. Wu, and J. Nieto, “Building an Aerial-Ground Robotics Systemfor Precision Farming: An Adaptable Solution,” IEEE Robotics & Automation Magazine, vol. 28, iss. 3, 2021.
    [BibTeX] [PDF]
    @Article{pretto2021ram,
    title = {{Building an Aerial-Ground Robotics Systemfor Precision Farming: An Adaptable Solution}},
    author = {A. Pretto and S. Aravecchia and W. Burgard and N. Chebrolu and C. Dornhege and T. Falck and F. Fleckenstein and A. Fontenla and M. Imperoli and R. Khanna and F. Liebisch and P. Lottes and A. Milioto and D. Nardi and S. Nardi and J. Pfeifer and M. Popovic and C. Potena and C. Pradalier and E. Rothacker-Feder and I. Sa and A. Schaefer and R. Siegwart and C. Stachniss and A. Walter and V. Winterhalter and X. Wu and J. Nieto},
    journal = ram,
    volume = 28,
    number = 3,
    year = {2021},
    url={https://www.ipb.uni-bonn.de/pdfs/pretto2021ram.pdf}
    }

  • D. Schunck, F. Magistri, R. A. Rosu, A. Cornelißen, N. Chebrolu, S. Paulus, J. Léon, S. Behnke, C. Stachniss, H. Kuhlmann, and L. Klingbeil, “Pheno4D: A spatio-temporal dataset of maize and tomato plant point clouds for phenotyping and advanced plant analysis ,” PLoS ONE, vol. 16, iss. 8, pp. 1-18, 2021. doi:10.1371/journal.pone.0256340
    [BibTeX] [PDF]

    Understanding the growth and development of individual plants is of central importance in modern agriculture, crop breeding, and crop science. To this end, using 3D data for plant analysis has gained attention over the last years. High-resolution point clouds offer the potential to derive a variety of plant traits, such as plant height, biomass, as well as the number and size of relevant plant organs. Periodically scanning the plants even allows for performing spatio-temporal growth analysis. However, highly accurate 3D point clouds from plants recorded at different growth stages are rare, and acquiring this kind of data is costly. Besides, advanced plant analysis methods from machine learning require annotated training data and thus generate intense manual labor before being able to perform an analysis. To address these issues, we present with this dataset paper a multi-temporal dataset featuring high-resolution registered point clouds of maize and tomato plants, which we manually labeled for computer vision tasks, such as for instance segmentation and 3D reconstruction, providing approximately 260 million labeled 3D points. To highlight the usability of the data and to provide baselines for other researchers, we show a variety of applications ranging from point cloud segmentation to non-rigid registration and surface reconstruction. We believe that our dataset will help to develop new algorithms to advance the research for plant phenotyping, 3D reconstruction, non-rigid registration, and deep learning on raw point clouds. The dataset is freely accessible at https://www.ipb.uni-bonn.de/data/pheno4d/.

    @article{schunck2021plosone,
    author = {D. Schunck and F. Magistri and R.A. Rosu and A. Corneli{\ss}en and N. Chebrolu and S. Paulus and J. L\'eon and S. Behnke and C. Stachniss and H. Kuhlmann and L. Klingbeil},
    title = {{Pheno4D: A spatio-temporal dataset of maize and tomato plant point clouds for phenotyping and advanced plant analysis
    }},
    journal = plosone,
    year = 2021,
    url = {https://journals.plos.org/plosone/article/file?id=10.1371/journal.pone.0256340&type=printable},
    volume = {16},
    number = {8},
    doi = {10.1371/journal.pone.0256340},
    pages = {1-18},
    abstract = {Understanding the growth and development of individual plants is of central importance in modern agriculture, crop breeding, and crop science. To this end, using 3D data for plant analysis has gained attention over the last years. High-resolution point clouds offer the potential to derive a variety of plant traits, such as plant height, biomass, as well as the number and size of relevant plant organs. Periodically scanning the plants even allows for performing spatio-temporal growth analysis. However, highly accurate 3D point clouds from plants recorded at different growth stages are rare, and acquiring this kind of data is costly. Besides, advanced plant analysis methods from machine learning require annotated training data and thus generate intense manual labor before being able to perform an analysis. To address these issues, we present with this dataset paper a multi-temporal dataset featuring high-resolution registered point clouds of maize and tomato plants, which we manually labeled for computer vision tasks, such as for instance segmentation and 3D reconstruction, providing approximately 260 million labeled 3D points. To highlight the usability of the data and to provide baselines for other researchers, we show a variety of applications ranging from point cloud segmentation to non-rigid registration and surface reconstruction. We believe that our dataset will help to develop new algorithms to advance the research for plant phenotyping, 3D reconstruction, non-rigid registration, and deep learning on raw point clouds. The dataset is freely accessible at https://www.ipb.uni-bonn.de/data/pheno4d/.},
    }

  • F. Stache, J. Westheider, F. Magistri, M. Popović, and C. Stachniss, “Adaptive Path Planning for UAV-based Multi-Resolution Semantic Segmentation,” in Proc. of the European Conf. on Mobile Robots (ECMR), 2021.
    [BibTeX] [PDF]
    @InProceedings{stache2021ecmr,
    author = {F. Stache and J. Westheider and F. Magistri and M. Popovi\'c and C. Stachniss},
    title = {{Adaptive Path Planning for UAV-based Multi-Resolution Semantic Segmentation}},
    booktitle = ecmr,
    year = {2021},
    }

  • M. Arora, L. Wiesmann, X. Chen, and C. Stachniss, “Mapping the Static Parts of Dynamic Scenes from 3D LiDAR Point Clouds Exploiting Ground Segmentation,” in Proc. of the European Conf. on Mobile Robots (ECMR), 2021.
    [BibTeX] [PDF] [Code]
    @InProceedings{arora2021ecmr,
    author = {M. Arora and L. Wiesmann and X. Chen and C. Stachniss},
    title = {{Mapping the Static Parts of Dynamic Scenes from 3D LiDAR Point Clouds Exploiting Ground Segmentation}},
    booktitle = ecmr,
    codeurl = {https://github.com/humbletechy/Dynamic-Point-Removal},
    year = {2021},
    }

  • H. Dong, X. Chen, and C. Stachniss, “Online Range Image-based Pole Extractor for Long-term LiDAR Localization in Urban Environments,” in Proc. of the European Conf. on Mobile Robots (ECMR), 2021.
    [BibTeX] [PDF] [Code]
    @InProceedings{dong2021ecmr,
    author = {H. Dong and X. Chen and C. Stachniss},
    title = {{Online Range Image-based Pole Extractor for Long-term LiDAR Localization in Urban Environments}},
    booktitle = ecmr,
    year = {2021},
    codeurl = {https://github.com/PRBonn/pole-localization},
    url = {https://www.ipb.uni-bonn.de/pdfs/dong2021ecmr.pdf}
    }

  • X. Chen, T. Läbe, A. Milioto, T. Röhling, J. Behley, and C. Stachniss, “OverlapNet: A Siamese Network for Computing LiDAR Scan Similarity with Applications to Loop Closing and Localization,” Autonomous Robots, vol. 46, p. 61–81, 2021. doi:10.1007/s10514-021-09999-0
    [BibTeX] [PDF] [Code]
    @article{chen2021auro,
    author = {X. Chen and T. L\"abe and A. Milioto and T. R\"ohling and J. Behley and C. Stachniss},
    title = {{OverlapNet: A Siamese Network for Computing LiDAR Scan Similarity with Applications to Loop Closing and Localization}},
    journal = {Autonomous Robots},
    year = {2021},
    doi = {10.1007/s10514-021-09999-0},
    issn = {1573-7527},
    volume=46,
    pages={61--81},
    codeurl = {https://github.com/PRBonn/OverlapNet},
    url = {https://www.ipb.uni-bonn.de/pdfs/chen2021auro.pdf}
    }

  • L. Di Giammarino, I. Aloise, C. Stachniss, and G. Grisetti, “Visual Place Recognition using LiDAR Intensity Information ,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2021.
    [BibTeX] [PDF]
    @inproceedings{digiammarino2021iros,
    title = {{Visual Place Recognition using LiDAR Intensity Information }},
    author = {Di Giammarino, L. and I. Aloise and C. Stachniss and G. Grisetti},
    booktitle = iros,
    year = {2021}
    }

  • P. Rottmann, T. Posewsky, A. Milioto, C. Stachniss, and J. Behley, “Improving Monocular Depth Estimation by Semantic Pre-training,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2021.
    [BibTeX] [PDF]
    @inproceedings{rottmann2021iros,
    title = {{Improving Monocular Depth Estimation by Semantic Pre-training}},
    author = {P. Rottmann and T. Posewsky and A. Milioto and C. Stachniss and J. Behley},
    booktitle = iros,
    year = {2021},
    url = {https://www.ipb.uni-bonn.de/pdfs/rottmann2021iros.pdf}
    }

  • B. Mersch, T. Höllen, K. Zhao, C. Stachniss, and R. Roscher, “Maneuver-based Trajectory Prediction for Self-driving Cars Using Spatio-temporal Convolutional Networks,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2021.
    [BibTeX] [PDF] [Video]
    @inproceedings{mersch2021iros,
    title = {{Maneuver-based Trajectory Prediction for Self-driving Cars Using Spatio-temporal Convolutional Networks}},
    author = {B. Mersch and T. H\"ollen and K. Zhao and C. Stachniss and R. Roscher},
    booktitle = iros,
    year = {2021},
    videourl = {https://youtu.be/5RRGWUn4qAw},
    url = {https://www.ipb.uni-bonn.de/pdfs/mersch2021iros.pdf}
    }

  • M. Zhou, X. Chen, N. Samano, C. Stachniss, and A. Calway, “Efficient Localisation Using Images and OpenStreetMaps,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2021.
    [BibTeX] [PDF]
    @inproceedings{zhou2021iros,
    title = {Efficient Localisation Using Images and OpenStreetMaps},
    author = {Zhou, Mengjie and Chen, Xieyuanli and Samano, Noe and Stachniss, Cyrill and Calway, Andrew},
    booktitle = iros,
    year = {2021},
    url = {https://www.ipb.uni-bonn.de/pdfs/zhou2021iros.pdf}
    }

  • C. Shi, X. Chen, K. Huang, J. Xiao, H. Lu, and C. Stachniss, “Keypoint Matching for Point Cloud Registration using Multiplex Dynamic Graph Attention Networks,” IEEE Robotics and Automation Letters (RA-L), vol. 6, pp. 8221-8228, 2021. doi:10.1109/LRA.2021.3097275
    [BibTeX] [PDF]
    @article{shi2021ral,
    title={{Keypoint Matching for Point Cloud Registration using Multiplex Dynamic Graph Attention Networks}},
    author={C. Shi and X. Chen and K. Huang and J. Xiao and H. Lu and C. Stachniss},
    year={2021},
    journal=ral,
    volume=6,
    issue=4,
    pages={8221-8228},
    doi = {10.1109/LRA.2021.3097275},
    issn = {2377-3766},
    }

  • X. Chen, S. Li, B. Mersch, L. Wiesmann, J. Gall, J. Behley, and C. Stachniss, “Moving Object Segmentation in 3D LiDAR Data: A Learning-based Approach Exploiting Sequential Data,” IEEE Robotics and Automation Letters (RA-L), vol. 6, pp. 6529-6536, 2021. doi:10.1109/LRA.2021.3093567
    [BibTeX] [PDF] [Code] [Video]
    @article{chen2021ral,
    title={{Moving Object Segmentation in 3D LiDAR Data: A Learning-based Approach Exploiting Sequential Data}},
    author={X. Chen and S. Li and B. Mersch and L. Wiesmann and J. Gall and J. Behley and C. Stachniss},
    year={2021},
    volume=6,
    issue=4,
    pages={6529-6536},
    journal=ral,
    url = {https://www.ipb.uni-bonn.de/pdfs/chen2021ral-iros.pdf},
    codeurl = {https://github.com/PRBonn/LiDAR-MOS},
    videourl = {https://youtu.be/NHvsYhk4dhw},
    doi = {10.1109/LRA.2021.3093567},
    issn = {2377-3766},
    }

  • N. Chebrolu, “Spatio-Temporal Registration Techniques for Agricultural Robots,” PhD Thesis, 2021.
    [BibTeX] [PDF]
    @PhdThesis{chebrolu2021phd,
    author = {N. Chebrolu},
    title = {Spatio-Temporal Registration Techniques for Agricultural Robots},
    year = 2021,
    school = {University of Bonn},
    URL = {https://hdl.handle.net/20.500.11811/9166},
    }

  • L. Peters, D. Fridovich-Keil, V. Rubies-Royo, C. J. Tomlin, and C. Stachniss, “Inferring Objectives in Continuous Dynamic Games from Noise-Corrupted Partial State Observations,” in Proc. of Robotics: Science and Systems (RSS), 2021.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{peters2021rss,
    title = {Inferring Objectives in Continuous Dynamic Games from Noise-Corrupted Partial State Observations},
    author = {Peters, Lasse and Fridovich-Keil, David and Rubies-Royo, Vicenc and Tomlin, Claire J. and Stachniss, Cyrill},
    booktitle = rss,
    year = {2021},
    codeurl = {https://github.com/PRBonn/PartiallyObservedInverseGames.jl},
    videourl = {https://www.youtube.com/watch?v=BogCsYQX9Pc},
    url = {https://arxiv.org/abs/2106.03611}
    }

  • M. Aygün, A. Osep, M. Weber, M. Maximov, C. Stachniss, J. Behley, and L. Leal-Taixe, “4D Panoptic Segmentation,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021.
    [BibTeX] [PDF]
    @inproceedings{ayguen2021cvpr,
    author = {M. Ayg\"un and A. Osep and M. Weber and M. Maximov and C. Stachniss and J. Behley and L. Leal-Taixe},
    title = {{4D Panoptic Segmentation}},
    booktitle = cvpr,
    year = 2021,
    url = {https://www.ipb.uni-bonn.de/pdfs/ayguen2021cvpr.pdf}
    }

  • F. Magistri, N. Chebrolu, J. Behley, and C. Stachniss, “Towards In-Field Phenotyping Exploiting Differentiable Rendering with Self-Consistency Loss,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2021.
    [BibTeX] [PDF] [Video]
    @inproceedings{magistri2021icra,
    author = {F. Magistri and N. Chebrolu and J. Behley and C. Stachniss},
    title = {{Towards In-Field Phenotyping Exploiting Differentiable Rendering with Self-Consistency Loss}},
    booktitle = icra,
    year = 2021,
    videourl = {https://youtu.be/MF2A4ihY2lE},
    }

  • I. Vizzo, X. Chen, N. Chebrolu, J. Behley, and C. Stachniss, “Poisson Surface Reconstruction for LiDAR Odometry and Mapping,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2021.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{vizzo2021icra,
    author = {I. Vizzo and X. Chen and N. Chebrolu and J. Behley and C. Stachniss},
    title = {{Poisson Surface Reconstruction for LiDAR Odometry and Mapping}},
    booktitle = icra,
    year = 2021,
    url = {https://www.ipb.uni-bonn.de/pdfs/vizzo2021icra.pdf},
    codeurl = {https://github.com/PRBonn/puma},
    videourl = {https://youtu.be/7yWtYWaO5Nk}
    }

  • X. Chen, I. Vizzo, T. Läbe, J. Behley, and C. Stachniss, “Range Image-based LiDAR Localization for Autonomous Vehicles,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2021.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{chen2021icra,
    author = {X. Chen and I. Vizzo and T. L{\"a}be and J. Behley and C. Stachniss},
    title = {{Range Image-based LiDAR Localization for Autonomous Vehicles}},
    booktitle = icra,
    year = 2021,
    url = {https://www.ipb.uni-bonn.de/pdfs/chen2021icra.pdf},
    codeurl = {https://github.com/PRBonn/range-mcl},
    videourl = {https://youtu.be/hpOPXX9oPqI},
    }

  • A. Reinke, X. Chen, and C. Stachniss, “Simple But Effective Redundant Odometry for Autonomous Vehicles,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2021.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{reinke2021icra,
    title={{Simple But Effective Redundant Odometry for Autonomous Vehicles}},
    author={A. Reinke and X. Chen and C. Stachniss},
    booktitle=icra,
    year=2021,
    url = {https://www.ipb.uni-bonn.de/pdfs/reinke2021icra.pdf},
    codeurl = {https://github.com/PRBonn/MutiverseOdometry},
    videourl = {https://youtu.be/zLpnPEyDKfM}
    }

  • J. Behley, A. Milioto, and C. Stachniss, “A Benchmark for LiDAR-based Panoptic Segmentation based on KITTI,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2021.
    [BibTeX] [PDF]
    @inproceedings{behley2021icra,
    author = {J. Behley and A. Milioto and C. Stachniss},
    title = {{A Benchmark for LiDAR-based Panoptic Segmentation based on KITTI}},
    booktitle = icra,
    year = 2021,
    }

  • L. Peters, D. Fridovich-Keil, V. Rubies-Royo, C. J. Tomlin, and C. Stachniss, “Cost Inference in Smooth Dynamic Games from Noise-Corrupted Partial State Observations ,” in Proc. of the RSS Workshop on Social Robot Navigation, 2021.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{peters2021rssws,
    title = {{Cost Inference in Smooth Dynamic Games from Noise-Corrupted Partial State Observations
    }},
    author = {Peters, Lasse and Fridovich-Keil, David and Rubies-Royo, Vicenc and Tomlin, Claire J. and Stachniss, Cyrill},
    booktitle = {Proc. of the RSS Workshop on Social Robot Navigation},
    year = {2021},
    codeurl = {https://github.com/PRBonn/PartiallyObservedInverseGames.jl},
    videourl = {https://www.youtube.com/watch?v=BogCsYQX9Pc},
    url = {https://socialrobotnavigation.github.io/papers/paper13.pdf}
    }

  • N. Chebrolu, T. Läbe, O. Vysotska, J. Behley, and C. Stachniss, “Adaptive Robust Kernels for Non-Linear Least Squares Problems,” IEEE Robotics and Automation Letters (RA-L), vol. 6, pp. 2240-2247, 2021. doi:10.1109/LRA.2021.3061331
    [BibTeX] [PDF] [Video]
    @article{chebrolu2021ral,
    author = {N. Chebrolu and T. L\"{a}be and O. Vysotska and J. Behley and C. Stachniss},
    title = {{Adaptive Robust Kernels for Non-Linear Least Squares Problems}},
    journal = ral,
    volume = 6,
    issue = 2,
    pages = {2240-2247},
    doi = {10.1109/LRA.2021.3061331},
    year = 2021,
    videourl = {https://youtu.be/34Zp3ZX0Bnk}
    }

  • J. Weyler, A. Milioto, T. Falck, J. Behley, and C. Stachniss, “Joint Plant Instance Detection and Leaf Count Estimation for In-Field Plant Phenotyping,” IEEE Robotics and Automation Letters (RA-L), vol. 6, pp. 3599-3606, 2021. doi:10.1109/LRA.2021.3060712
    [BibTeX] [PDF] [Video]
    @article{weyler2021ral,
    author = {J. Weyler and A. Milioto and T. Falck and J. Behley and C. Stachniss},
    title = {{Joint Plant Instance Detection and Leaf Count Estimation for In-Field Plant Phenotyping}},
    journal = ral,
    volume = 6,
    issue = 2,
    pages = {3599-3606},
    doi = {10.1109/LRA.2021.3060712},
    year = 2021,
    videourl = {https://youtu.be/Is18Rey625I},
    }

  • L. Wiesmann, A. Milioto, X. Chen, C. Stachniss, and J. Behley, “Deep Compression for Dense Point Cloud Maps,” IEEE Robotics and Automation Letters (RA-L), vol. 6, pp. 2060-2067, 2021. doi:10.1109/LRA.2021.3059633
    [BibTeX] [PDF] [Code] [Video]
    @article{wiesmann2021ral,
    author = {L. Wiesmann and A. Milioto and X. Chen and C. Stachniss and J. Behley},
    title = {{Deep Compression for Dense Point Cloud Maps}},
    journal = ral,
    volume = 6,
    issue = 2,
    pages = {2060-2067},
    doi = {10.1109/LRA.2021.3059633},
    year = 2021,
    url = {https://www.ipb.uni-bonn.de/pdfs/wiesmann2021ral.pdf},
    codeurl = {https://github.com/PRBonn/deep-point-map-compression},
    videourl = {https://youtu.be/fLl9lTlZrI0}
    }

  • N. Chebrolu, F. Magistri, T. Läbe, and C. Stachniss, “Registration of Spatio-Temporal Point Clouds of Plants for Phenotyping,” PLoS ONE, vol. 16, iss. 2, 2021.
    [BibTeX] [PDF] [Video]
    @article{chebrolu2021plosone,
    author = {N. Chebrolu and F. Magistri and T. L{\"a}be and C. Stachniss},
    title = {{Registration of Spatio-Temporal Point Clouds of Plants for Phenotyping}},
    journal = plosone,
    year = 2021,
    volume = 16,
    number = 2,
    videourl = {https://youtu.be/OV39kb5Nqg8},
    }

  • F. Görlich, E. Marks, A. Mahlein, K. König, P. Lottes, and C. Stachniss, “UAV-Based Classification of Cercospora Leaf Spot Using RGB Images,” Drones, vol. 5, iss. 2, 2021. doi:10.3390/drones5020034
    [BibTeX] [PDF]

    Plant diseases can impact crop yield. Thus, the detection of plant diseases using sensors that can be mounted on aerial vehicles is in the interest of farmers to support decision-making in integrated pest management and to breeders for selecting tolerant or resistant genotypes. This paper investigated the detection of Cercospora leaf spot (CLS), caused by Cercospora beticola in sugar beet using RGB imagery. We proposed an approach to tackle the CLS detection problem using fully convolutional neural networks, which operate directly on RGB images captured by a UAV. This efficient approach does not require complex multi- or hyper-spectral sensors, but provides reliable results and high sensitivity. We provided a detection pipeline for pixel-wise semantic segmentation of CLS symptoms, healthy vegetation, and background so that our approach can automatically quantify the grade of infestation. We thoroughly evaluated our system using multiple UAV datasets recorded from different sugar beet trial fields. The dataset consisted of a training and a test dataset and originated from different fields. We used it to evaluate our approach under realistic conditions and analyzed its generalization capabilities to unseen environments. The obtained results correlated to visual estimation by human experts significantly. The presented study underlined the potential of high-resolution RGB imaging and convolutional neural networks for plant disease detection under field conditions. The demonstrated procedure is particularly interesting for applications under practical conditions, as no complex and cost-intensive measuring system is required.

    @Article{goerlich2021drones,
    AUTHOR = {Görlich, Florian and Marks, Elias and Mahlein, Anne-Katrin and König, Kathrin and Lottes, Philipp and Stachniss, Cyrill},
    TITLE = {{UAV-Based Classification of Cercospora Leaf Spot Using RGB Images}},
    JOURNAL = {Drones},
    VOLUME = {5},
    YEAR = {2021},
    NUMBER = {2},
    ARTICLE-NUMBER = {34},
    URL = {https://www.mdpi.com/2504-446X/5/2/34/pdf},
    ISSN = {2504-446X},
    ABSTRACT = {Plant diseases can impact crop yield. Thus, the detection of plant diseases using sensors that can be mounted on aerial vehicles is in the interest of farmers to support decision-making in integrated pest management and to breeders for selecting tolerant or resistant genotypes. This paper investigated the detection of Cercospora leaf spot (CLS), caused by Cercospora beticola in sugar beet using RGB imagery. We proposed an approach to tackle the CLS detection problem using fully convolutional neural networks, which operate directly on RGB images captured by a UAV. This efficient approach does not require complex multi- or hyper-spectral sensors, but provides reliable results and high sensitivity. We provided a detection pipeline for pixel-wise semantic segmentation of CLS symptoms, healthy vegetation, and background so that our approach can automatically quantify the grade of infestation. We thoroughly evaluated our system using multiple UAV datasets recorded from different sugar beet trial fields. The dataset consisted of a training and a test dataset and originated from different fields. We used it to evaluate our approach under realistic conditions and analyzed its generalization capabilities to unseen environments. The obtained results correlated to visual estimation by human experts significantly. The presented study underlined the potential of high-resolution RGB imaging and convolutional neural networks for plant disease detection under field conditions. The demonstrated procedure is particularly interesting for applications under practical conditions, as no complex and cost-intensive measuring system is required.},
    DOI = {10.3390/drones5020034}
    }

  • W. Förstner, Bayes-Schätzung und Maximum-Likelihood-Schätzung, 2021.
    [BibTeX] [PDF]

    Das Ziel dieser Notiz ist das Prinzip der Bayes-Schätzung und der Maximum-Likelihood-Schätzung zu erläutern.

    @misc{foerstner2021bayesml,
    author = {W. F{\"o}rstner},
    title = {{Bayes-Sch{\"a}tzung und Maximum-Likelihood-Sch{\"a}tzung}},
    year = 2021,
    url = {https://www.ipb.uni-bonn.de/pdfs/foerstner2021bayesml.pdf},
    abstract = {Das Ziel dieser Notiz ist das Prinzip der Bayes-Sch{\"a}tzung und der Maximum-Likelihood-Sch{\"a}tzung zu erl{\"a}utern.},
    }

  • C. Carbone, D. Albani, F. Magistri, D. Ognibene, C. Stachniss, G. Kootstra, D. Nardi, and V. Trianni, “Monitoring and Mapping of Crop Fields with UAV Swarms Based on Information Gain,” in Proc. of the Intl. Symp. on Distributed Autonomous Robotic Systems (DARS), 2021.
    [BibTeX] [PDF]
    @inproceedings{carbone2021dars,
    author = {C. Carbone and D. Albani and F. Magistri and D. Ognibene and C. Stachniss and G. Kootstra and D. Nardi and V. Trianni},
    title = {{Monitoring and Mapping of Crop Fields with UAV Swarms Based on Information Gain}},
    booktitle = dars,
    year = 2021,
    }

  • C. Stachniss, “Achievements Needed for Becoming a Professor,” Academia Letters, iss. 281, 2021. doi:https://doi.org/10.20935/AL281
    [BibTeX] [PDF] [Video]

    What is needed to become a professor? This article summarizes what selection committees often regard as the minimum achievements when recruiting new professors. My goal is to give early-career researchers a brief guideline on their way towards becoming a faculty member.

    @article{stachniss2021al,
    author = {C. Stachniss},
    title = {{Achievements Needed for Becoming a Professor}},
    year = {2021},
    journal = {Academia Letters},
    number = {281},
    doi = {https://doi.org/10.20935/AL281},
    url = {https://www.ipb.uni-bonn.de/pdfs/stachniss2021al.pdf},
    abstract = {What is needed to become a professor? This article summarizes what selection committees often regard as the minimum achievements when recruiting new professors. My goal is to give early-career researchers a brief guideline on their way towards becoming a faculty member.},
    videourl = {https://youtu.be/223cMIgN5p0}
    }

2020

  • D. Barath, M. Polic, W. Förstner, T. Sattler, T. Pajdla, and Z. Kukelova, “Making Affine Correspondences Work in Camera Geometry Computation,” in Computer Vision – ECCV 2020, Cham, 2020, p. 723–740. doi:https://doi.org/10.1007/978-3-030-58621-8_42
    [BibTeX] [PDF]
    @InProceedings{barath2020eccv,
    author="Barath, Daniel
    and Polic, Michal
    and F{\"o}rstner, Wolfgang
    and Sattler, Torsten
    and Pajdla, Tomas
    and Kukelova, Zuzana",
    editor="Vedaldi, Andrea
    and Bischof, Horst
    and Brox, Thomas
    and Frahm, Jan-Michael",
    title="Making Affine Correspondences Work in Camera Geometry Computation",
    booktitle="Computer Vision -- ECCV 2020",
    year="2020",
    publisher="Springer International Publishing",
    address="Cham",
    pages="723--740",
    isbn="978-3-030-58621-8",
    doi = {https://doi.org/10.1007/978-3-030-58621-8_42},
    url = {https://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123560698.pdf}
    }

  • C. Stachniss, I. Vizzo, L. Wiesmann, and N. Berning, How To Setup and Run a 100\% Digital Conf.: DIGICROP 2020, 2020.
    [BibTeX] [PDF]

    The purpose of this record is to document the setup and execution of DIGICROP 2020 and to simplify conducting future online events of that kind. DIGICROP 2020 was a 100\% virtual conference run via Zoom with around 900 registered people in November 2020. It consisted of video presentations available via our website and a single-day live event for Q&A. We had around 450 people attending the Q&A session overall, most of the time 200-250 people have been online at the same time. This document is a collection of notes, instructions, and todo lists. It is not a polished manual, however, we believe these notes will be useful for other conference organizers and for us in the future.

    @misc{stachniss2020digitalconf,
    author = {C. Stachniss and I. Vizzo and L. Wiesmann and N. Berning},
    title = {{How To Setup and Run a 100\% Digital Conf.: DIGICROP 2020}},
    year = {2020},
    url = {https://www.ipb.uni-bonn.de/pdfs/stachniss2020digitalconf.pdf},
    abstract = {The purpose of this record is to document the setup and execution of DIGICROP 2020 and to simplify conducting future online events of that kind. DIGICROP 2020 was a 100\% virtual conference run via Zoom with around 900 registered people in November 2020. It consisted of video presentations available via our website and a single-day live event for Q&A. We had around 450 people attending the Q&A session overall, most of the time 200-250 people have been online at the same time. This document is a collection of notes, instructions, and todo lists. It is not a polished manual, however, we believe these notes will be useful for other conference organizers and for us in the future.},
    }

  • A. Milioto, J. Behley, C. McCool, and C. Stachniss, “LiDAR Panoptic Segmentation for Autonomous Driving,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020.
    [BibTeX] [PDF] [Video]
    @inproceedings{milioto2020iros,
    author = {A. Milioto and J. Behley and C. McCool and C. Stachniss},
    title = {{LiDAR Panoptic Segmentation for Autonomous Driving}},
    booktitle = iros,
    year = {2020},
    videourl = {https://www.youtube.com/watch?v=C9CTQSosr9I},
    }

  • X. Chen, T. Läbe, L. Nardi, J. Behley, and C. Stachniss, “Learning an Overlap-based Observation Model for 3D LiDAR Localization,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{chen2020iros,
    author = {X. Chen and T. L\"abe and L. Nardi and J. Behley and C. Stachniss},
    title = {{Learning an Overlap-based Observation Model for 3D LiDAR Localization}},
    booktitle = iros,
    year = {2020},
    codeurl = {https://github.com/PRBonn/overlap_localization},
    url={https://www.ipb.uni-bonn.de/pdfs/chen2020iros.pdf},
    videourl = {https://www.youtube.com/watch?v=BozPqy_6YcE},
    }

  • F. Langer, A. Milioto, A. Haag, J. Behley, and C. Stachniss, “Domain Transfer for Semantic Segmentation of LiDAR Data using Deep Neural Networks,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{langer2020iros,
    author = {F. Langer and A. Milioto and A. Haag and J. Behley and C. Stachniss},
    title = {{Domain Transfer for Semantic Segmentation of LiDAR Data using Deep Neural Networks}},
    booktitle = iros,
    year = {2020},
    url = {https://www.ipb.uni-bonn.de/pdfs/langer2020iros.pdf},
    videourl = {https://youtu.be/6FNGF4hKBD0},
    codeurl = {https://github.com/PRBonn/lidar_transfer},
    }

  • F. Magistri, N. Chebrolu, and C. Stachniss, “Segmentation-Based 4D Registration of Plants Point Clouds for Phenotyping,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020.
    [BibTeX] [PDF] [Video]
    @inproceedings{magistri2020iros,
    author = {F. Magistri and N. Chebrolu and C. Stachniss},
    title = {{Segmentation-Based 4D Registration of Plants Point Clouds for Phenotyping}},
    booktitle = iros,
    year = {2020},
    url={https://www.ipb.uni-bonn.de/pdfs/magistri2020iros.pdf},
    videourl = {https://youtu.be/OV39kb5Nqg8},
    }

  • D. Gogoll, P. Lottes, J. Weyler, N. Petrinic, and C. Stachniss, “Unsupervised Domain Adaptation for Transferring Plant Classification Systems to New Field Environments, Crops, and Robots,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020.
    [BibTeX] [PDF] [Video]
    @inproceedings{gogoll2020iros,
    author = {D. Gogoll and P. Lottes and J. Weyler and N. Petrinic and C. Stachniss},
    title = {{Unsupervised Domain Adaptation for Transferring Plant Classification Systems to New Field Environments, Crops, and Robots}},
    booktitle = iros,
    year = {2020},
    url = {https://www.ipb.uni-bonn.de/pdfs/gogoll2020iros.pdf},
    videourl = {https://www.youtube.com/watch?v=6K79Ih6KXTs},
    }

  • X. Chen, T. Läbe, A. Milioto, T. Röhling, O. Vysotska, A. Haag, J. Behley, and C. Stachniss, “OverlapNet: Loop Closing for LiDAR-based SLAM,” in Proc. of Robotics: Science and Systems (RSS), 2020.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{chen2020rss,
    author = {X. Chen and T. L\"abe and A. Milioto and T. R\"ohling and O. Vysotska and A. Haag and J. Behley and C. Stachniss},
    title = {{OverlapNet: Loop Closing for LiDAR-based SLAM}},
    booktitle = rss,
    year = {2020},
    codeurl = {https://github.com/PRBonn/OverlapNet/},
    videourl = {https://youtu.be/YTfliBco6aw},
    }

  • W. Förstner, “Symmetric Least Squares Matching – Sym-LSM,” Institut für Photogrammetrie, Universität Bonn 2020.
    [BibTeX] [PDF] [Code]
    @TechReport{foerstner2020report-sym-lsm,
    author = {F{\"o}rstner, Wolfgang},
    title = {{Symmetric Least Squares Matching -- Sym-LSM}},
    institution = {Institut für Photogrammetrie, Universität Bonn},
    year = {2020},
    codeurl = {https://www.ipb.uni-bonn.de/symmetric-least-squares-matching},
    }

  • N. Chebrolu, T. Laebe, O. Vysotska, J. Behley, and C. Stachniss, “Adaptive Robust Kernels for Non-Linear Least Squares Problems,” arXiv Preprint, 2020.
    [BibTeX] [PDF]
    @article{chebrolu2020arxiv,
    title={Adaptive Robust Kernels for Non-Linear Least Squares Problems},
    author={N. Chebrolu and T. Laebe and O. Vysotska and J. Behley and C. Stachniss},
    journal = arxiv,
    year=2020,
    eprint={2004.14938},
    keywords={cs.RO},
    url={https://arxiv.org/pdf/2004.14938v2}
    }

  • J. Behley, A. Milioto, and C. Stachniss, “A Benchmark for LiDAR-based Panoptic Segmentation based on KITTI,” arXiv Preprint, 2020.
    [BibTeX] [PDF]

    Panoptic segmentation is the recently introduced task that tackles semantic segmentation and instance segmentation jointly. In this paper, we present an extension of SemanticKITTI, which is a large-scale dataset providing dense point-wise semantic labels for all sequences of the KITTI Odometry Benchmark, for training and evaluation of laser-based panoptic segmentation. We provide the data and discuss the processing steps needed to enrich a given semantic annotation with temporally consistent instance information, i.e., instance information that supplements the semantic labels and identifies the same instance over sequences of LiDAR point clouds. Additionally, we present two strong baselines that combine state-of-the-art LiDAR-based semantic segmentation approaches with a state-of-the-art detector enriching the segmentation with instance information and that allow other researchers to compare their approaches against. We hope that our extension of SemanticKITTI with strong baselines enables the creation of novel algorithms for LiDAR-based panoptic segmentation as much as it has for the original semantic segmentation and semantic scene completion tasks. Data, code, and an online evaluation using a hidden test set will be published on https://semantic-kitti.org.

    @article{behley2020arxiv,
    author = {J. Behley and A. Milioto and C. Stachniss},
    title = {{A Benchmark for LiDAR-based Panoptic Segmentation based on KITTI}},
    journal = arxiv,
    year = 2020,
    eprint = {2003.02371v1},
    url = {https://arxiv.org/pdf/2003.02371v1},
    keywords = {cs.CV},
    abstract = {Panoptic segmentation is the recently introduced task that tackles semantic segmentation and instance segmentation jointly. In this paper, we present an extension of SemanticKITTI, which is a large-scale dataset providing dense point-wise semantic labels for all sequences of the KITTI Odometry Benchmark, for training and evaluation of laser-based panoptic segmentation. We provide the data and discuss the processing steps needed to enrich a given semantic annotation with temporally consistent instance information, i.e., instance information that supplements the semantic labels and identifies the same instance over sequences of LiDAR point clouds. Additionally, we present two strong baselines that combine state-of-the-art LiDAR-based semantic segmentation approaches with a state-of-the-art detector enriching the segmentation with instance information and that allow other researchers to compare their approaches against. We hope that our extension of SemanticKITTI with strong baselines enables the creation of novel algorithms for LiDAR-based panoptic segmentation as much as it has for the original semantic segmentation and semantic scene completion tasks. Data, code, and an online evaluation using a hidden test set will be published on https://semantic-kitti.org.}
    }

  • X. Wu, S. Aravecchia, P. Lottes, C. Stachniss, and C. Pradalier, “Robotic Weed Control Using Automated Weed and Crop Classification,” Journal of Field Robotics, vol. 37, pp. 322-340, 2020.
    [BibTeX] [PDF]
    @Article{wu2020jfr,
    title = {Robotic Weed Control Using Automated Weed and Crop Classification},
    author = {X. Wu and S. Aravecchia and P. Lottes and C. Stachniss and C. Pradalier},
    journal = jfr,
    year = {2020},
    volume = {37},
    numer = {2},
    pages = {322-340},
    url = {https://www.ipb.uni-bonn.de/pdfs/wu2020jfr.pdf},
    }

  • P. Lottes, J. Behley, N. Chebrolu, A. Milioto, and C. Stachniss, “Robust joint stem detection and crop-weed classification using image sequences for plant-specific treatment in precision farming,” Journal of Field Robotics, vol. 37, pp. 20-34, 2020. doi:https://doi.org/10.1002/rob.21901
    [BibTeX] [PDF]
    @Article{lottes2020jfr,
    title = {Robust joint stem detection and crop-weed classification using image sequences for plant-specific treatment in precision farming},
    author = {Lottes, P. and Behley, J. and Chebrolu, N. and Milioto, A. and Stachniss, C.},
    journal = jfr,
    volume = {37},
    numer = {1},
    pages = {20-34},
    year = {2020},
    doi = {https://doi.org/10.1002/rob.21901},
    url = {https://www.ipb.uni-bonn.de/pdfs/lottes2019jfr.pdf},
    }

  • N. Chebrolu, T. Laebe, and C. Stachniss, “Spatio-Temporal Non-Rigid Registration of 3D Point Clouds of Plants,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2020.
    [BibTeX] [PDF] [Video]
    @InProceedings{chebrolu2020icra,
    title = {Spatio-Temporal Non-Rigid Registration of 3D Point Clouds of Plants},
    author = {N. Chebrolu and T. Laebe and C. Stachniss},
    booktitle = icra,
    year = {2020},
    url = {https://www.ipb.uni-bonn.de/pdfs/chebrolu2020icra.pdf},
    videourl = {https://www.youtube.com/watch?v=uGkep_aelBc},
    }

  • A. Ahmadi, L. Nardi, N. Chebrolu, and C. Stachniss, “Visual Servoing-based Navigation for Monitoring Row-Crop Fields,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2020.
    [BibTeX] [PDF] [Code] [Video]
    @InProceedings{ahmadi2020icra,
    title = {Visual Servoing-based Navigation for Monitoring Row-Crop Fields},
    author = {A. Ahmadi and L. Nardi and N. Chebrolu and C. Stachniss},
    booktitle = icra,
    year = {2020},
    url = {https://arxiv.org/pdf/1909.12754},
    codeurl = {https://github.com/PRBonn/visual-crop-row-navigation},
    videourl = {https://youtu.be/0qg6n4sshHk},
    }

  • L. Nardi and C. Stachniss, “Long-Term Robot Navigation in Indoor Environments Estimating Patterns in Traversability Changes,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2020.
    [BibTeX] [PDF] [Video]
    @InProceedings{nardi2020icra,
    title = {Long-Term Robot Navigation in Indoor Environments Estimating Patterns in Traversability Changes},
    author = {L. Nardi and C. Stachniss},
    booktitle = icra,
    year = {2020},
    url = {https://arxiv.org/pdf/1909.12733},
    videourl = {https://www.youtube.com/watch?v=9lNcA3quzwU},
    }

  • R. Sheikh, A. Milioto, P. Lottes, C. Stachniss, M. Bennewitz, and T. Schultz, “Gradient and Log-based Active Learning for Semantic Segmentation of Crop and Weed for Agricultural Robots,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2020.
    [BibTeX] [PDF] [Video]
    @InProceedings{sheikh2020icra,
    title = {Gradient and Log-based Active Learning for Semantic Segmentation of Crop and Weed for Agricultural Robots},
    author = {R. Sheikh and A. Milioto and P. Lottes and C. Stachniss and M. Bennewitz and T. Schultz},
    booktitle = icra,
    year = {2020},
    url = {https://www.ipb.uni-bonn.de/pdfs/sheikh2020icra.pdf},
    videourl = {https://www.youtube.com/watch?v=NySa59gxFAg},
    }

  • J. Quenzel, R. A. Rosu, T. Laebe, C. Stachniss, and S. Behnke, “Beyond Photometric Consistency: Gradient-based Dissimilarity for Improving Visual Odometry and Stereo Matching,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2020.
    [BibTeX] [PDF] [Video]
    @InProceedings{quenzel020icra,
    title = {Beyond Photometric Consistency: Gradient-based Dissimilarity for Improving Visual Odometry and Stereo Matching},
    author = {J. Quenzel and R.A. Rosu and T. Laebe and C. Stachniss and S. Behnke},
    booktitle = icra,
    year = {2020},
    url = {https://www.ipb.uni-bonn.de/pdfs/quenzel2020icra.pdf},
    videourl = {https://www.youtube.com/watch?v=cqv7k-BK0g0},
    }

  • P. Regier, A. Milioto, C. Stachniss, and M. Bennewitz, “Classifying Obstacles and Exploiting Class Information for Humanoid Navigation Through Cluttered Environments,” The Intl. Journal of Humanoid Robotics (IJHR), vol. 17, iss. 02, p. 2050013, 2020. doi:10.1142/S0219843620500139
    [BibTeX] [PDF]

    Humanoid robots are often supposed to share their workspace with humans and thus have to deal with objects used by humans in their everyday life. In this article, we present our novel approach to humanoid navigation through cluttered environments, which exploits knowledge about different obstacle classes to decide how to deal with obstacles and select appropriate robot actions. To classify objects from RGB images and decide whether an obstacle can be overcome by the robot with a corresponding action, e.g., by pushing or carrying it aside or stepping over or onto it, we train and exploit a convolutional neural network (CNN). Based on associated action costs, we compute a cost grid containing newly observed objects in addition to static obstacles on which a 2D path can be efficiently planned. This path encodes the necessary actions that need to be carried out by the robot to reach the goal. We implemented our framework in the Robot Operating System (ROS) and tested it in various scenarios with a Nao robot as well as in simulation with the REEM-C robot. As the experiments demonstrate, using our CNN, the robot can robustly classify the observed obstacles into the different classes and decide on suitable actions to find efficient solution paths. Our system finds paths also through regions where traditional motion planning methods are not able to calculate a solution or require substantially more time.

    @article{regier2020ijhr,
    author = {Regier, P. and Milioto, A. and Stachniss, C. and Bennewitz, M.},
    title = {{Classifying Obstacles and Exploiting Class Information for Humanoid Navigation Through Cluttered Environments}},
    journal = ijhr,
    volume = {17},
    number = {02},
    pages = {2050013},
    year = {2020},
    doi = {10.1142/S0219843620500139},
    abstract = {Humanoid robots are often supposed to share their workspace with humans and thus have to deal with objects used by humans in their everyday life. In this article, we present our novel approach to humanoid navigation through cluttered environments, which exploits knowledge about different obstacle classes to decide how to deal with obstacles and select appropriate robot actions. To classify objects from RGB images and decide whether an obstacle can be overcome by the robot with a corresponding action, e.g., by pushing or carrying it aside or stepping over or onto it, we train and exploit a convolutional neural network (CNN). Based on associated action costs, we compute a cost grid containing newly observed objects in addition to static obstacles on which a 2D path can be efficiently planned. This path encodes the necessary actions that need to be carried out by the robot to reach the goal. We implemented our framework in the Robot Operating System (ROS) and tested it in various scenarios with a Nao robot as well as in simulation with the REEM-C robot. As the experiments demonstrate, using our CNN, the robot can robustly classify the observed obstacles into the different classes and decide on suitable actions to find efficient solution paths. Our system finds paths also through regions where traditional motion planning methods are not able to calculate a solution or require substantially more time. }
    }

2019

  • E. Palazzolo, “Active 3D Reconstruction for Mobile Robots,” PhD Thesis, 2019.
    [BibTeX] [PDF]
    @PhdThesis{palazzolo2019phd,
    author = {Palazzolo, E.},
    title = {Active 3D Reconstruction for Mobile Robots},
    year = 2019,
    school = {University of Bonn},
    URL = {https://www.ipb.uni-bonn.de/pdfs/palazzolo2019phd.pdf}
    }

  • J. Behley, M. Garbade, A. Milioto, J. Quenzel, S. Behnke, C. Stachniss, and J. Gall, “SemanticKITTI: A Dataset for Semantic Scene Understanding of LiDAR Sequences,” in Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2019.
    [BibTeX] [PDF] [Video]
    @InProceedings{behley2019iccv,
    author = {J. Behley and M. Garbade and A. Milioto and J. Quenzel and S. Behnke and C. Stachniss and J. Gall},
    title = {{SemanticKITTI: A Dataset for Semantic Scene Understanding of LiDAR Sequences}},
    booktitle = iccv,
    year = {2019},
    videourl = {https://www.ipb.uni-bonn.de/html/projects/semantic_kitti/videos/teaser.mp4},
    }

  • O. Vysotska, “Visual Place Recognition in Changing Environments,” PhD Thesis, 2019.
    [BibTeX] [PDF]
    @PhdThesis{vysotska2019phd,
    author = {O. Vysotska},
    title = {Visual Place Recognition in Changing Environments},
    year = 2019,
    school =  {University of Bonn},
    URL = {https://hss.ulb.uni-bonn.de/2019/5593/5593.pdf},
    }

  • A. Pretto, S. Aravecchia, W. Burgard, N. Chebrolu, C. Dornhege, T. Falck, F. Fleckenstein, A. Fontenla, M. Imperoli, R. Khanna, F. Liebisch, P. Lottes, A. Milioto, D. Nardi, S. Nardi, J. Pfeifer, M. Popović, C. Potena, C. Pradalier, E. Rothacker-Feder, I. Sa, A. Schaefer, R. Siegwart, C. Stachniss, A. Walter, W. Winterhalter, X. Wu, and J. Nieto, “Building an Aerial-Ground Robotics System for Precision Farming,” arXiv Preprint, 2019.
    [BibTeX] [PDF]
    @article{pretto2019arxiv,
    author = {A. Pretto and S. Aravecchia and W. Burgard and N. Chebrolu and C. Dornhege and T. Falck and F. Fleckenstein and A. Fontenla and M. Imperoli and R. Khanna and F. Liebisch and P. Lottes and A. Milioto and D. Nardi and S. Nardi and J. Pfeifer and M. Popović and C. Potena and C. Pradalier and E. Rothacker-Feder and I. Sa and A. Schaefer and R. Siegwart and C. Stachniss and A. Walter and W. Winterhalter and X. Wu and J. Nieto},
    title = {{Building an Aerial-Ground Robotics System for Precision Farming}},
    journal = arxiv,
    year = 2019,
    eprint = {1911.03098v1},
    url = {https://arxiv.org/pdf/1911.03098v1},
    keywords = {cs.RO},
    }

  • O. Vysotska and C. Stachniss, “Effective Visual Place Recognition Using Multi-Sequence Maps,” IEEE Robotics and Automation Letters (RA-L), vol. 4, pp. 1730-1736, 2019.
    [BibTeX] [PDF] [Video]
    @article{vysotska2019ral,
    author = {O. Vysotska and C. Stachniss},
    title = {{Effective Visual Place Recognition Using Multi-Sequence Maps}},
    journal = ral,
    year = 2019,
    volume = 4,
    issue = 2,
    pages = {1730-1736},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska2019ral.pdf},
    videourl = {https://youtu.be/wFU0JoXTH3c},
    }

  • E. Palazzolo, J. Behley, P. Lottes, P. Giguère, and C. Stachniss, “ReFusion: 3D Reconstruction in Dynamic Environments for RGB-D Cameras Exploiting Residuals,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2019.
    [BibTeX] [PDF] [Code] [Video]
    @InProceedings{palazzolo2019iros,
    author = {E. Palazzolo and J. Behley and P. Lottes and P. Gigu\`ere and C. Stachniss},
    title = {{ReFusion: 3D Reconstruction in Dynamic Environments for RGB-D Cameras Exploiting Residuals}},
    booktitle = iros,
    year = {2019},
    url = {https://www.ipb.uni-bonn.de/pdfs/palazzolo2019iros.pdf},
    codeurl = {https://github.com/PRBonn/refusion},
    videourl = {https://youtu.be/1P9ZfIS5-p4},
    }

  • X. Chen, A. Milioto, E. Palazzolo, P. Giguère, J. Behley, and C. Stachniss, “SuMa++: Efficient LiDAR-based Semantic SLAM,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2019.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{chen2019iros,
    author = {X. Chen and A. Milioto and E. Palazzolo and P. Giguère and J. Behley and C. Stachniss},
    title = {{SuMa++: Efficient LiDAR-based Semantic SLAM}},
    booktitle = iros,
    year = 2019,
    codeurl = {https://github.com/PRBonn/semantic_suma/},
    videourl = {https://youtu.be/uo3ZuLuFAzk},
    }

  • A. Milioto, I. Vizzo, J. Behley, and C. Stachniss, “RangeNet++: Fast and Accurate LiDAR Semantic Segmentation,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2019.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{milioto2019iros,
    author = {A. Milioto and I. Vizzo and J. Behley and C. Stachniss},
    title = {{RangeNet++: Fast and Accurate LiDAR Semantic Segmentation}},
    booktitle = iros,
    year = 2019,
    codeurl = {https://github.com/PRBonn/lidar-bonnetal},
    videourl = {https://youtu.be/wuokg7MFZyU},
    }

  • F. Yan, O. Vysotska, and C. Stachniss, ” Global Localization on OpenStreetMap Using 4-bit Semantic Descriptors,” in Proc. of the European Conf. on Mobile Robots (ECMR), 2019.
    [BibTeX] [PDF]
    @InProceedings{yan2019ecmr,
    author = {F. Yan and O. Vysotska and C. Stachniss},
    title = {{ Global Localization on OpenStreetMap Using 4-bit Semantic Descriptors}},
    booktitle = ecmr,
    year = {2019},
    }

  • L. Zabawa, A. Kicherer, L. Klingbeil, A. Milioto, R. Topfer, H. Kuhlmann, and R. Roscher, “Detection of Single Grapevine Berries in Images Using Fully Convolutional Neural Networks,” in The IEEE Conf. on Computer Vision and Pattern Recognition (CVPR) Workshops, 2019.
    [BibTeX] [PDF]
    @InProceedings{zabawa2019cvpr-workshop,
    author = {L. Zabawa and A. Kicherer and L. Klingbeil and A. Milioto and R. Topfer and H. Kuhlmann and R. Roscher},
    title = {{Detection of Single Grapevine Berries in Images Using Fully Convolutional Neural Networks}},
    booktitle = {The IEEE Conf. on Computer Vision and Pattern Recognition (CVPR) Workshops},
    month = {June},
    year = {2019}
    }

  • O. Vysotska, H. Kuhlmann, and C. Stachniss, “UAVs Towards Sustainable Crop Production,” in Workshop at Robotics: Science and Systems, 2019.
    [BibTeX] [PDF]
    @InProceedings{vysotska2019rsswsabstract,
    author = {O. Vysotska and H. Kuhlmann and C. Stachniss},
    title = {{UAVs Towards Sustainable Crop Production}},
    booktitle = {Workshop at Robotics: Science and Systems},
    year = {2019},
    note = {Abstract},
    }

  • A. Milioto and C. Stachniss, “Bonnet: An Open-Source Training and Deployment Framework for Semantic Segmentation in Robotics using CNNs,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF] [Code] [Video]
    @InProceedings{milioto2019icra,
    author = {A. Milioto and C. Stachniss},
    title = {{Bonnet: An Open-Source Training and Deployment Framework for Semantic Segmentation in Robotics using CNNs}},
    booktitle = icra,
    year = 2019,
    codeurl = {https://github.com/Photogrammetry-Robotics-Bonn/bonnet},
    videourl = {https://www.youtube.com/watch?v=tfeFHCq6YJs},
    }

  • A. Milioto, L. Mandtler, and C. Stachniss, “Fast Instance and Semantic Segmentation Exploiting Local Connectivity, Metric Learning, and One-Shot Detection for Robotics ,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF]
    @InProceedings{milioto2019icra-fiass,
    author = {A. Milioto and L. Mandtler and C. Stachniss},
    title = {{Fast Instance and Semantic Segmentation Exploiting Local Connectivity, Metric Learning, and One-Shot Detection for Robotics }},
    booktitle = icra,
    year = 2019,
    }

  • L. Nardi and C. Stachniss, “Uncertainty-Aware Path Planning for Navigation on Road Networks Using Augmented MDPs ,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF] [Video]
    @InProceedings{nardi2019icra-uapp,
    author = {L. Nardi and C. Stachniss},
    title = {{Uncertainty-Aware Path Planning for Navigation on Road Networks Using Augmented MDPs }},
    booktitle = icra,
    year = 2019,
    url={https://www.ipb.uni-bonn.de/pdfs/nardi2019icra-uapp.pdf},
    videourl = {https://youtu.be/3PMSamgYzi4},
    }

  • L. Nardi and C. Stachniss, “Actively Improving Robot Navigation On Different Terrains Using Gaussian Process Mixture Models,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF] [Video]
    @InProceedings{nardi2019icra-airn,
    author = {L. Nardi and C. Stachniss},
    title = {{Actively Improving Robot Navigation On Different Terrains Using Gaussian Process Mixture Models}},
    booktitle = icra,
    year = 2019,
    url={https://www.ipb.uni-bonn.de/pdfs/nardi2019icra-airn.pdf},
    videourl = {https://youtu.be/DlMbP3u1g2Y},
    }

  • D. Wilbers, C. Merfels, and C. Stachniss, “Localization with Sliding Window Factor Graphs on Third-Party Maps for Automated Driving,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF]
    @InProceedings{wilbers2019icra,
    author = {D. Wilbers and Ch. Merfels and C. Stachniss},
    title = {{Localization with Sliding Window Factor Graphs on Third-Party Maps for Automated Driving}},
    booktitle = icra,
    year = 2019,
    }

  • N. Chebrolu, P. Lottes, T. Laebe, and C. Stachniss, “Robot Localization Based on Aerial Images for Precision Agriculture Tasks in Crop Fields,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF] [Video]
    @InProceedings{chebrolu2019icra,
    author = {N. Chebrolu and P. Lottes and T. Laebe and C. Stachniss},
    title = {{Robot Localization Based on Aerial Images for Precision Agriculture Tasks in Crop Fields}},
    booktitle = icra,
    year = 2019,
    url = {https://www.ipb.uni-bonn.de/pdfs/chebrolu2019icra.pdf},
    videourl = {https://youtu.be/TlijLgoRLbc},
    }

  • K. Huang, J. Xiao, and C. Stachniss, “Accurate Direct Visual-Laser Odometry with Explicit Occlusion Handling and Plane Detection,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF]
    @InProceedings{huang2019icra,
    author = {K. Huang and J. Xiao and C. Stachniss},
    title = {{Accurate Direct Visual-Laser Odometry with Explicit Occlusion Handling and Plane Detection}},
    booktitle = icra,
    year = 2019,
    }

  • R. Schirmer, P. Bieber, and C. Stachniss, “Coverage Path Planning in Belief Space ,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF]
    @InProceedings{schirmer2019icra,
    author = {R. Schirmer and P. Bieber and C. Stachniss},
    title = {{Coverage Path Planning in Belief Space }},
    booktitle = icra,
    year = 2019,
    }

  • D. Wilbers, L. Rumberg, and C. Stachniss, “Approximating Marginalization with Sparse Global Priors for Sliding Window SLAM-Graphs,” in Proc. of the IEEE Intl. Conf. on Robotic Computing (IRC), 2019.
    [BibTeX] [PDF]

    Most autonomous vehicles rely on some kind of map for localization or navigation. Outdated maps however are a risk to the performance of any map-based localization system applied in autonomous vehicles. It is necessary to update the used maps to ensure stable and long-term operation. We address the problem of computing landmark updates live in the vehicle, which requires efficient use of the computational resources. In particular, we employ a graph-based sliding window approach for simultaneous localization and incremental map refinement. We propose a novel method that approximates sliding window marginalization without inducing fill-in. Our method maintains the exact same sparsity pattern as without performing marginalization, but simultaneously improves the landmark estimates. The main novelty of this work is the derivation of sparse global priors that approximate dense marginalization. In comparison to state-of-the-art work, our approach utilizes global instead of local linearization points, but still minimizes linearization errors. We first approximate marginalization via Kullback-Leibler divergence and then recalculate the mean to compensate linearization errors. We evaluate our approach on simulated and real data from a prototype vehicle and compare our approach to state-of-the-art sliding window marginalization.

    @InProceedings{wilbers2019irc-amws,
    author = {D. Wilbers and L. Rumberg and C. Stachniss},
    title = {{Approximating Marginalization with Sparse Global Priors for Sliding Window SLAM-Graphs}},
    booktitle = {Proc. of the IEEE Intl. Conf. on Robotic Computing (IRC)},
    year = 2019,
    abstract = {Most autonomous vehicles rely on some kind of map for localization or navigation. Outdated maps however are a risk to the performance of any map-based localization system applied in autonomous vehicles. It is necessary to update the used maps to ensure stable and long-term operation. We address the problem of computing landmark updates live in the vehicle, which requires efficient use of the computational resources. In particular, we employ a graph-based sliding window approach for simultaneous localization and incremental map refinement. We propose a novel method that approximates sliding window marginalization without inducing fill-in. Our method maintains the exact same sparsity pattern as without performing marginalization, but simultaneously improves the landmark estimates. The main novelty of this work is the derivation of sparse global priors that approximate dense marginalization. In comparison to state-of-the-art work, our approach utilizes global instead of local linearization points, but still minimizes linearization errors. We first approximate marginalization via Kullback-Leibler divergence and then recalculate the mean to compensate linearization errors. We evaluate our approach on simulated and real data from a prototype vehicle and compare our approach to state-of-the-art sliding window marginalization.},
    }

  • D. Wilbers, C. Merfels, and C. Stachniss, “A Comparison of Particle Filter and Graph-based Optimization for Localization with Landmarks in Automated Vehicles,” in Proc. of the IEEE Intl. Conf. on Robotic Computing (IRC), 2019.
    [BibTeX] [PDF]
    @InProceedings{wilbers2019irc-cpfg,
    author = {D. Wilbers and Ch. Merfels and C. Stachniss},
    title = {{A Comparison of Particle Filter and Graph-based Optimization for Localization with Landmarks in Automated Vehicles}},
    booktitle = {Proc. of the IEEE Intl. Conf. on Robotic Computing (IRC)},
    year = 2019,
    }

  • P. Lottes, N. Chebrolu, F. Liebisch, and C. Stachniss, “UAV-based Field Monitoring for Precision Farming,” in Proc. of the 25th Workshop für Computer-Bildanalyse und unbemannte autonom fliegende Systeme in der Landwirtschaft, 2019.
    [BibTeX] [PDF]
    @InProceedings{lottes2019cbaws,
    title={UAV-based Field Monitoring for Precision Farming},
    author={P. Lottes and N. Chebrolu and F. Liebisch and C. Stachniss},
    booktitle= {Proc. of the 25th Workshop f\"ur Computer-Bildanalyse und unbemannte autonom fliegende Systeme in der Landwirtschaft},
    year= {2019},
    url= {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/lottes2019cbaws.pdf},
    }

  • L. Klingbeil, E. Heinz, M. Wieland, J. Eichel, T. Läbe, and H. Kuhlmann, “On the UAV based Analysis of Slow Geomorphological Processes: A Case Study at a Solifluction Lobe in the Turtmann Valley,” in Proc. of the 4th Joint International Symposium on Deformation Monitoring (JISDM), 2019.
    [BibTeX] [PDF]
    @InProceedings{klingbeil19jisdm,
    author = {L. Klingbeil and E. Heinz and M. Wieland and J. Eichel and T. L\"abe and H. Kuhlmann},
    title = {On the UAV based Analysis of Slow Geomorphological Processes: A Case Study at a Solifluction Lobe in the Turtmann Valley},
    booktitle = {Proc. of the 4th Joint International Symposium on Deformation Monitoring (JISDM)},
    year = 2019,
    url = {https://www.ipb.uni-bonn.de/pdfs/klingbeil19jisdm.pdf},
    }

2018

  • I. Sa, M. Popovic, R. Khanna, Z. Chen, P. Lottes, F. Liebisch, J. Nieto, C. Stachniss, and R. Siegwart, “WeedMap: A Large-Scale Semantic Weed Mapping Framework Using Aerial Multispectral Imaging and Deep Neural Network for Precision Farming,” , vol. 10, 2018. doi:10.3390/rs10091423
    [BibTeX] [PDF]

    {The ability to automatically monitor agricultural fields is an important capability in precision farming, enabling steps towards more sustainable agriculture. Precise, high-resolution monitoring is a key prerequisite for targeted intervention and the selective application of agro-chemicals. The main goal of this paper is developing a novel crop/weed segmentation and mapping framework that processes multispectral images obtained from an unmanned aerial vehicle (UAV) using a deep neural network (DNN). Most studies on crop/weed semantic segmentation only consider single images for processing and classification. Images taken by UAVs often cover only a few hundred square meters with either color only or color and near-infrared (NIR) channels. Although a map can be generated by processing single segmented images incrementally, this requires additional complex information fusion techniques which struggle to handle high fidelity maps due to their computational costs and problems in ensuring global consistency. Moreover, computing a single large and accurate vegetation map (e.g., crop/weed) using a DNN is non-trivial due to difficulties arising from: (1) limited ground sample distances (GSDs) in high-altitude datasets, (2) sacrificed resolution resulting from downsampling high-fidelity images, and (3) multispectral image alignment. To address these issues, we adopt a stand sliding window approach that operates on only small portions of multispectral orthomosaic maps (tiles), which are channel-wise aligned and calibrated radiometrically across the entire map. We define the tile size to be the same as that of the DNN input to avoid resolution loss. Compared to our baseline model (i.e., SegNet with 3 channel RGB inputs) yielding an area under the curve (AUC) of [background=0.607

    @Article{sa2018rs,
    author = {I. Sa and M. Popovic and R. Khanna and Z. Chen and P. Lottes and F. Liebisch and J. Nieto and C. Stachniss and R. Siegwart},
    title = {{WeedMap: A Large-Scale Semantic Weed Mapping Framework Using Aerial Multispectral Imaging and Deep Neural Network for Precision Farming}},
    journal = rs,
    year = 2018,
    volume = 10,
    issue = 9,
    url = {https://www.mdpi.com/2072-4292/10/9/1423/pdf},
    doi = {10.3390/rs10091423},
    abstract = {The ability to automatically monitor agricultural fields is an important capability in precision farming, enabling steps towards more sustainable agriculture. Precise, high-resolution monitoring is a key prerequisite for targeted intervention and the selective application of agro-chemicals. The main goal of this paper is developing a novel crop/weed segmentation and mapping framework that processes multispectral images obtained from an unmanned aerial vehicle (UAV) using a deep neural network (DNN). Most studies on crop/weed semantic segmentation only consider single images for processing and classification. Images taken by UAVs often cover only a few hundred square meters with either color only or color and near-infrared (NIR) channels. Although a map can be generated by processing single segmented images incrementally, this requires additional complex information fusion techniques which struggle to handle high fidelity maps due to their computational costs and problems in ensuring global consistency. Moreover, computing a single large and accurate vegetation map (e.g., crop/weed) using a DNN is non-trivial due to difficulties arising from: (1) limited ground sample distances (GSDs) in high-altitude datasets, (2) sacrificed resolution resulting from downsampling high-fidelity images, and (3) multispectral image alignment. To address these issues, we adopt a stand sliding window approach that operates on only small portions of multispectral orthomosaic maps (tiles), which are channel-wise aligned and calibrated radiometrically across the entire map. We define the tile size to be the same as that of the DNN input to avoid resolution loss. Compared to our baseline model (i.e., SegNet with 3 channel RGB inputs) yielding an area under the curve (AUC) of [background=0.607, crop=0.681, weed=0.576], our proposed model with 9 input channels achieves [0.839, 0.863, 0.782]. Additionally, we provide an extensive analysis of 20 trained models, both qualitatively and quantitatively, in order to evaluate the effects of varying input channels and tunable network hyperparameters. Furthermore, we release a large sugar beet/weed aerial dataset with expertly guided annotations for further research in the fields of remote sensing, precision agriculture, and agricultural robotics.},
    }

  • N. Chebrolu, T. Läbe, and C. Stachniss, “Robust Long-Term Registration of UAV Images of Crop Fields for Precision Agriculture,” IEEE Robotics and Automation Letters (RA-L), vol. 3, iss. 4, pp. 3097-3104, 2018. doi:10.1109/LRA.2018.2849603
    [BibTeX] [PDF]
    @Article{chebrolu2018ral,
    author={N. Chebrolu and T. L\"abe and C. Stachniss},
    journal=ral,
    title={Robust Long-Term Registration of UAV Images of Crop Fields for Precision Agriculture},
    year={2018},
    volume={3},
    number={4},
    pages={3097-3104},
    keywords={Agriculture;Cameras;Geometry;Monitoring;Robustness;Three-dimensional displays;Visualization;Robotics in agriculture and forestry;SLAM},
    doi={10.1109/LRA.2018.2849603},
    url={https://www.ipb.uni-bonn.de/pdfs/chebrolu2018ral.pdf}
    }

  • P. Lottes, J. Behley, A. Milioto, and C. Stachniss, “Fully Convolutional Networks with Sequential Information for Robust Crop and Weed Detection in Precision Farming,” IEEE Robotics and Automation Letters (RA-L), vol. 3, pp. 3097-3104, 2018. doi:10.1109/LRA.2018.2846289
    [BibTeX] [PDF] [Video]
    @Article{lottes2018ral,
    author = {P. Lottes and J. Behley and A. Milioto and C. Stachniss},
    title = {Fully Convolutional Networks with Sequential Information for Robust Crop and Weed Detection in Precision Farming},
    journal = ral,
    year = {2018},
    volume = {3},
    issue = {4},
    pages = {3097-3104},
    doi = {10.1109/LRA.2018.2846289},
    url = {https://www.ipb.uni-bonn.de/pdfs/lottes2018ral.pdf},
    videourl = {https://www.youtube.com/watch?v=vTepw9HRLh8},
    }

  • P. Regier, A. Milioto, P. Karkowski, C. Stachniss, and M. Bennewitz, “Classifying Obstacles and Exploiting Knowledge about Classes for Efficient Humanoid Navigation,” in Proc. of the IEEE-RAS Int. Conf. on Humanoid Robots (HUMANOIDS), 2018.
    [BibTeX] [PDF]
    @InProceedings{regier2018humanoids,
    author = {P. Regier and A. Milioto and P. Karkowski and C. Stachniss and M. Bennewitz},
    title = {{Classifying Obstacles and Exploiting Knowledge about Classes for Efficient Humanoid Navigation}},
    booktitle = {Proc. of the IEEE-RAS Int. Conf. on Humanoid Robots (HUMANOIDS)},
    year = 2018,
    }

  • K. H. Huang and C. Stachniss, “Joint Ego-motion Estimation Using a Laser Scanner and a Monocular Camera Through Relative Orientation Estimation and 1-DoF ICP,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2018.
    [BibTeX] [PDF] [Video]

    Pose estimation and mapping are key capabilities of most autonomous vehicles and thus a number of localization and SLAM algorithms have been developed in the past. Autonomous robots and cars are typically equipped with multiple sensors. Often, the sensor suite includes a camera and a laser range finder. In this paper, we consider the problem of incremental ego-motion estimation, using both, a monocular camera and a laser range finder jointly. We propose a new algorithm, that exploits the advantages of both sensors–-the ability of cameras to determine orientations well and the ability of laser range finders to estimate the scale and to directly obtain 3D point clouds. Our approach estimates the five degree of freedom relative orientation from image pairs through feature point correspondences and formulates the remaining scale estimation as a new variant of the iterative closet point problem with only one degree of freedom. We furthermore exploit the camera information in a new way to constrain the data association between laser point clouds. The experiments presented in this paper suggest that our approach is able to accurately estimate the ego-motion of a vehicle and that we obtain more accurate frame-to-frame alignments than with one sensor modality alone.

    @InProceedings{huang2018iros,
    author = {K.H. Huang and C. Stachniss},
    title = {{Joint Ego-motion Estimation Using a Laser Scanner and a Monocular Camera Through Relative Orientation Estimation and 1-DoF ICP}},
    booktitle = iros,
    year = 2018,
    videourl = {https://www.youtube.com/watch?v=Glv0UT_KqoM},
    abstract = {Pose estimation and mapping are key capabilities of most autonomous vehicles and thus a number of localization and SLAM algorithms have been developed in the past. Autonomous robots and cars are typically equipped with multiple sensors. Often, the sensor suite includes a camera and a laser range finder. In this paper, we consider the problem of incremental ego-motion estimation, using both, a monocular camera and a laser range finder jointly. We propose a new algorithm, that exploits the advantages of both sensors---the ability of cameras to determine orientations well and the ability of laser range finders to estimate the scale and to directly obtain 3D point clouds. Our approach estimates the five degree of freedom relative orientation from image pairs through feature point correspondences and formulates the remaining scale estimation as a new variant of the iterative closet point problem with only one degree of freedom. We furthermore exploit the camera information in a new way to constrain the data association between laser point clouds. The experiments presented in this paper suggest that our approach is able to accurately estimate the ego-motion of a vehicle and that we obtain more accurate frame-to-frame alignments than with one sensor modality alone.}
    }

  • P. Lottes, J. Behley, N. Chebrolu, A. Milioto, and C. Stachniss, “Joint Stem Detection and Crop-Weed Classification for Plant-specific Treatment in Precision Farming,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2018.
    [BibTeX] [PDF] [Video]

    Applying agrochemicals is the default procedure for conventional weed control in crop production, but has negative impacts on the environment. Robots have the potential to treat every plant in the field individually and thus can reduce the required use of such chemicals. To achieve that, robots need the ability to identify crops and weeds in the field and must additionally select effective treatments. While certain types of weed can be treated mechanically, other types need to be treated by (selective) spraying. In this paper, we present an approach that provides the necessary information for effective plant-specific treatment. It outputs the stem location for weeds, which allows for mechanical treatments, and the covered area of the weed for selective spraying. Our approach uses an end-to- end trainable fully convolutional network that simultaneously estimates stem positions as well as the covered area of crops and weeds. It jointly learns the class-wise stem detection and the pixel-wise semantic segmentation. Experimental evaluations on different real-world datasets show that our approach is able to reliably solve this problem. Compared to state-of-the-art approaches, our approach not only substantially improves the stem detection accuracy, i.e., distinguishing crop and weed stems, but also provides an improvement in the semantic segmentation performance.

    @InProceedings{lottes2018iros,
    author = {P. Lottes and J. Behley and N. Chebrolu and A. Milioto and C. Stachniss},
    title = {Joint Stem Detection and Crop-Weed Classification for Plant-specific Treatment in Precision Farming},
    booktitle = iros,
    year = 2018,
    url = {https://www.ipb.uni-bonn.de/pdfs/lottes18iros.pdf},
    videourl = {https://www.youtube.com/watch?v=C9mjZxE_Sxg},
    abstract = {Applying agrochemicals is the default procedure for conventional weed control in crop production, but has negative impacts on the environment. Robots have the potential to treat every plant in the field individually and thus can reduce the required use of such chemicals. To achieve that, robots need the ability to identify crops and weeds in the field and must additionally select effective treatments. While certain types of weed can be treated mechanically, other types need to be treated by (selective) spraying. In this paper, we present an approach that provides the necessary information for effective plant-specific treatment. It outputs the stem location for weeds, which allows for mechanical treatments, and the covered area of the weed for selective spraying. Our approach uses an end-to- end trainable fully convolutional network that simultaneously estimates stem positions as well as the covered area of crops and weeds. It jointly learns the class-wise stem detection and the pixel-wise semantic segmentation. Experimental evaluations on different real-world datasets show that our approach is able to reliably solve this problem. Compared to state-of-the-art approaches, our approach not only substantially improves the stem detection accuracy, i.e., distinguishing crop and weed stems, but also provides an improvement in the semantic segmentation performance.}
    }

  • J. Jung, C. Stachniss, S. Ju, and J. Heo, “Automated 3D volumetric reconstruction of multiple-room building interiors for as-built BIM,” Advanced Engineering Informatics, vol. 38, pp. 811-825, 2018. doi:10.1016/j.aei.2018.10.007
    [BibTeX]

    Currently, fully automated as-built modeling of building interiors using point-cloud data still remains an open challenge, due to several problems that repeatedly arise: (1) complex indoor environments containing multiple rooms; (2) time-consuming and labor-intensive noise filtering; (3) difficulties of representation of volumetric and detail-rich objects such as windows and doors. This study aimed to overcome such limitations while improving the amount of details reproduced within the model for further utilization in BIM. First, we input just the registered three-dimensional (3D) point-cloud data and segmented the point cloud into separate rooms for more effective performance of the later modeling phases for each room. For noise filtering, an offset space from the ceiling height was used to determine whether the scan points belonged to clutter or architectural components. The filtered points were projected onto a binary map in order to trace the floor-wall boundary, which was further refined through subsequent segmentation and regularization procedures. Then, the wall volumes were estimated in two ways: inside- and outside-wall-component modeling. Finally, the wall points were segmented and projected onto an inverse binary map, thereby enabling detection and modeling of the hollow areas as windows or doors. The experimental results on two real-world data sets demonstrated, through comparison with manually-generated models, the effectiveness of our approach: the calculated RMSEs of the two resulting models were 0.089m and 0.074m, respectively.

    @article{jung2018aei,
    title = {Automated 3D volumetric reconstruction of multiple-room building interiors for as-built BIM},
    journal = aei,
    author = {J. Jung and C. Stachniss and S. Ju and J. Heo},
    volume = {38},
    pages = {811-825},
    year = 2018,
    issn = {1474-0346},
    doi = {10.1016/j.aei.2018.10.007},
    _weburl = {https://www.sciencedirect.com/science/article/pii/S1474034618300600},
    abstract = {Currently, fully automated as-built modeling of building interiors using point-cloud data still remains an open challenge, due to several problems that repeatedly arise: (1) complex indoor environments containing multiple rooms; (2) time-consuming and labor-intensive noise filtering; (3) difficulties of representation of volumetric and detail-rich objects such as windows and doors. This study aimed to overcome such limitations while improving the amount of details reproduced within the model for further utilization in BIM. First, we input just the registered three-dimensional (3D) point-cloud data and segmented the point cloud into separate rooms for more effective performance of the later modeling phases for each room. For noise filtering, an offset space from the ceiling height was used to determine whether the scan points belonged to clutter or architectural components. The filtered points were projected onto a binary map in order to trace the floor-wall boundary, which was further refined through subsequent segmentation and regularization procedures. Then, the wall volumes were estimated in two ways: inside- and outside-wall-component modeling. Finally, the wall points were segmented and projected onto an inverse binary map, thereby enabling detection and modeling of the hollow areas as windows or doors. The experimental results on two real-world data sets demonstrated, through comparison with manually-generated models, the effectiveness of our approach: the calculated RMSEs of the two resulting models were 0.089m and 0.074m, respectively.}
    }

  • J. Behley and C. Stachniss, “Efficient Surfel-Based SLAM using 3D Laser Range Data in Urban Environments,” in Proc. of Robotics: Science and Systems (RSS), 2018.
    [BibTeX] [PDF] [Video]
    @InProceedings{behley2018rss,
    author = {J. Behley and C. Stachniss},
    title = {Efficient Surfel-Based SLAM using 3D Laser Range Data in Urban Environments},
    booktitle = rss,
    year = 2018,
    videourl = {https://www.youtube.com/watch?v=-AEX203rXkE},
    url = {https://www.roboticsproceedings.org/rss14/p16.pdf},
    }

  • T. Naseer, W. Burgard, and C. Stachniss, “Robust Visual Localization Across Seasons,” IEEE Transactions on Robotics, pp. 1-14, 2018. doi:10.1109/tro.2017.2788045
    [BibTeX] [PDF]
    @Article{naseer2018tro,
    author = {T. Naseer and W. Burgard and C. Stachniss},
    title = {Robust Visual Localization Across Seasons},
    journal = ieeetransrob,
    year = 2018,
    pages = {1-14},
    doi = {10.1109/tro.2017.2788045},
    url = {https://www.ipb.uni-bonn.de/pdfs/naseer2018tro.pdf},
    }

  • B. Della Corte, I. Bogoslavskyi, C. Stachniss, and G. Grisetti, “A General Framework for Flexible Multi-Cue Photometric Point Cloud Registration,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018.
    [BibTeX] [PDF] [Code] [Video]
    @InProceedings{della-corte2018icra,
    author = {Della Corte, B. and I. Bogoslavskyi and C. Stachniss and G. Grisetti},
    title = {A General Framework for Flexible Multi-Cue Photometric Point Cloud Registration},
    year = 2018,
    booktitle = icra,
    codeurl = {https://gitlab.com/srrg-software/srrg_mpr},
    videourl = {https://www.youtube.com/watch?v=_z98guJTqfk},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/della-corte2018icra.pdf},
    }

  • A. Milioto, P. Lottes, and C. Stachniss, “Real-time Semantic Segmentation of Crop and Weed for Precision Agriculture Robots Leveraging Background Knowledge in CNNs,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018.
    [BibTeX] [PDF] [Video]

    Precision farming robots, which target to reduce the amount of herbicides that need to be brought out in the fields, must have the ability to identify crops and weeds in real time to trigger weeding actions. In this paper, we address the problem of CNN-based semantic segmentation of crop fields separating sugar beet plants, weeds, and background solely based on RGB data. We propose a CNN that exploits existing vegetation indexes and provides a classification in real time. Furthermore, it can be effectively re-trained to so far unseen fields with a comparably small amount of training data. We implemented and thoroughly evaluated our system on a real agricultural robot operating in different fields in Germany and Switzerland. The results show that our system generalizes well, can operate at around 20Hz, and is suitable for online operation in the fields.

    @InProceedings{milioto2018icra,
    author = {A. Milioto and P. Lottes and C. Stachniss},
    title = {Real-time Semantic Segmentation of Crop and Weed for Precision Agriculture Robots Leveraging Background Knowledge in CNNs},
    year = {2018},
    booktitle = icra,
    abstract = {Precision farming robots, which target to reduce the amount of herbicides that need to be brought out in the fields, must have the ability to identify crops and weeds in real time to trigger weeding actions. In this paper, we address the problem of CNN-based semantic segmentation of crop fields separating sugar beet plants, weeds, and background solely based on RGB data. We propose a CNN that exploits existing vegetation indexes and provides a classification in real time. Furthermore, it can be effectively re-trained to so far unseen fields with a comparably small amount of training data. We implemented and thoroughly evaluated our system on a real agricultural robot operating in different fields in Germany and Switzerland. The results show that our system generalizes well, can operate at around 20Hz, and is suitable for online operation in the fields.},
    url = {https://arxiv.org/abs/1709.06764},
    videourl = {https://youtu.be/DXcTkJmdWFQ},
    }

  • A. Milioto and C. Stachniss, “Bonnet: An Open-Source Training and Deployment Framework for Semantic Segmentation in Robotics using CNNs,” ICRA Worshop on Perception, Inference, and Learning for Joint Semantic, Geometric, and Physical Understanding, 2018.
    [BibTeX] [PDF] [Code] [Video]
    @Article{milioto2018icraws,
    author = {A. Milioto and C. Stachniss},
    title = "{Bonnet: An Open-Source Training and Deployment Framework for Semantic Segmentation in Robotics using CNNs}",
    journal = {ICRA Worshop on Perception, Inference, and Learning for Joint Semantic, Geometric, and Physical Understanding},
    eprint = {1802.08960},
    primaryclass = "cs.RO",
    keywords = {Computer Science - Robotics, Computer Science - Computer Vision and Pattern Recognition},
    year = 2018,
    month = may,
    url = {https://arxiv.org/abs/1802.08960},
    codeurl = {https://github.com/Photogrammetry-Robotics-Bonn/bonnet},
    videourl = {https://www.youtube.com/watch?v=tfeFHCq6YJs},
    }

  • E. Palazzolo and C. Stachniss, “Effective Exploration for MAVs Based on the Expected Information Gain,” Drones, vol. 2, iss. 1, 2018. doi:10.3390/drones2010009
    [BibTeX] [PDF]

    Micro aerial vehicles (MAVs) are an excellent platform for autonomous exploration. Most MAVs rely mainly on cameras for buliding a map of the 3D environment. Therefore, vision-based MAVs require an efficient exploration algorithm to select viewpoints that provide informative measurements. In this paper, we propose an exploration approach that selects in real time the next-best-view that maximizes the expected information gain of new measurements. In addition, we take into account the cost of reaching a new viewpoint in terms of distance and predictability of the flight path for a human observer. Finally, our approach selects a path that reduces the risk of crashes when the expected battery life comes to an end, while still maximizing the information gain in the process. We implemented and thoroughly tested our approach and the experiments show that it offers an improved performance compared to other state-of-the-art algorithms in terms of precision of the reconstruction, execution time, and smoothness of the path.

    @Article{palazzolo2018drones,
    author = {E. Palazzolo and C. Stachniss},
    title = {{Effective Exploration for MAVs Based on the Expected Information Gain}},
    journal = {Drones},
    volume = {2},
    year = {2018},
    number = {1},
    article-number= {9},
    url = {https://www.ipb.uni-bonn.de/pdfs/palazzolo2018drones.pdf},
    issn = {2504-446X},
    abstract = {Micro aerial vehicles (MAVs) are an excellent platform for autonomous exploration. Most MAVs rely mainly on cameras for buliding a map of the 3D environment. Therefore, vision-based MAVs require an efficient exploration algorithm to select viewpoints that provide informative measurements. In this paper, we propose an exploration approach that selects in real time the next-best-view that maximizes the expected information gain of new measurements. In addition, we take into account the cost of reaching a new viewpoint in terms of distance and predictability of the flight path for a human observer. Finally, our approach selects a path that reduces the risk of crashes when the expected battery life comes to an end, while still maximizing the information gain in the process. We implemented and thoroughly tested our approach and the experiments show that it offers an improved performance compared to other state-of-the-art algorithms in terms of precision of the reconstruction, execution time, and smoothness of the path.},
    doi = {10.3390/drones2010009},
    }

  • E. Palazzolo and C. Stachniss, “Fast Image-Based Geometric Change Detection Given a 3D Model,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018.
    [BibTeX] [PDF] [Code] [Video]
    @InProceedings{palazzolo2018icra,
    title = {{Fast Image-Based Geometric Change Detection Given a 3D Model}},
    author = {E. Palazzolo and C. Stachniss},
    booktitle = icra,
    year = {2018},
    url = {https://www.ipb.uni-bonn.de/pdfs/palazzolo2018icra.pdf},
    codeurl = {https://github.com/PRBonn/fast_change_detection},
    videourl = {https://youtu.be/DEkOYf4Zzh4},
    }

  • K. H. Huang and C. Stachniss, “On Geometric Models and Their Accuracy for Extrinsic Sensor Calibration,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018.
    [BibTeX] [PDF]
    @InProceedings{huang2018icra,
    author = {K.H. Huang and C. Stachniss},
    title = {On Geometric Models and Their Accuracy for Extrinsic Sensor Calibration},
    booktitle = icra,
    year = 2018,
    url = {https://www.ipb.uni-bonn.de/pdfs/huang2018icra.pdf},
    }

  • A. Walter, R. Khanna, P. Lottes, C. Stachniss, R. Siegwart, J. Nieto, and F. Liebisch, “Flourish – A robotic approach for automation in crop management,” in Proc. of the Intl. Conf. on Precision Agriculture (ICPA), 2018.
    [BibTeX] [PDF]

    The Flourish project aims to bridge the gap between current and desired capabilities of agricultural robots by developing an adaptable robotic solution for precision farming. Combining the aerial survey capabilities of a small autonomous multi-copter Unmanned Aerial Vehicle (UAV) with a multi-purpose agricultural Unmanned Ground Vehicle (UGV), the system will be able to survey a field from the air, perform targeted intervention on the ground, and provide detailed information for decision support, all with minimal user intervention. The system can be adapted to a wide range of farm management activities and to different crops by choosing different sensors, status indicators and ground treatment packages. The research project thereby touches a selection of topics addressed by ICPA such as sensor application in managing in-season crop variability, precision nutrient management and crop protection as well as remote sensing applications in precision agriculture and engineering technologies and advances. This contribution will introduce the Flourish consortium and concept using the results of three years of active development, testing, and measuring in field campaigns. Two key parts of the project will be shown in more detail: First, mapping of the field by drones for detection of sugar beet nitrogen status variation and weed pressure in the field and second the perception of the UGV as related to weed classification and subsequent precision weed management. The field mapping by means of an UAV will be shown for crop nitrogen status estimation and weed pressure with examples for subsequent crop management decision support. For nitrogen status, the results indicate that drones are up to the task to deliver crop nitrogen variability maps utilized for variable rate application that are of comparable quality to current on-tractor systems. The weed pressure mapping is viable as basis for the UGV showcase of precision weed management. For this, we show the automated image acquisition by the UGV and a subsequent plant classification with a four-step pipeline, differentiating crop from weed in real time. Advantages and disadvantages as well as future prospects of such approaches will be discussed.

    @InProceedings{walter2018icpa,
    Title = {Flourish - A robotic approach for automation in crop management},
    Author = {A. Walter and R. Khanna and P. Lottes and C. Stachniss and R. Siegwart and J. Nieto and F. Liebisch},
    Booktitle = icpa,
    Year = 2018,
    abstract = {The Flourish project aims to bridge the gap between current and desired capabilities of agricultural robots by developing an adaptable robotic solution for precision farming. Combining the aerial survey capabilities of a small autonomous multi-copter Unmanned Aerial Vehicle (UAV) with a multi-purpose agricultural Unmanned Ground Vehicle (UGV), the system will be able to survey a field from the air, perform targeted intervention on the ground, and provide detailed information for decision support, all with minimal user intervention. The system can be adapted to a wide range of farm management activities and to different crops by choosing different sensors, status indicators and ground treatment packages. The research project thereby touches a selection of topics addressed by ICPA such as sensor application in managing in-season crop variability, precision nutrient management and crop protection as well as remote sensing applications in precision agriculture and engineering technologies and advances. This contribution will introduce the Flourish consortium and concept using the results of three years of active development, testing, and measuring in field campaigns. Two key parts of the project will be shown in more detail: First, mapping of the field by drones for detection of sugar beet nitrogen status variation and weed pressure in the field and second the perception of the UGV as related to weed classification and subsequent precision weed management. The field mapping by means of an UAV will be shown for crop nitrogen status estimation and weed pressure with examples for subsequent crop management decision support. For nitrogen status, the results indicate that drones are up to the task to deliver crop nitrogen variability maps utilized for variable rate application that are of comparable quality to current on-tractor systems. The weed pressure mapping is viable as basis for the UGV showcase of precision weed management. For this, we show the automated image acquisition by the UGV and a subsequent plant classification with a four-step pipeline, differentiating crop from weed in real time. Advantages and disadvantages as well as future prospects of such approaches will be discussed.},
    }

  • F. Langer, L. Mandtler, A. Milioto, E. Palazzolo, and C. Stachniss, “Geometrical Stem Detection from Image Data for Precision Agriculture,” arXiv Preprint, 2018.
    [BibTeX] [PDF]
    @article{langer2018arxiv,
    author = {F. Langer and L. Mandtler and A. Milioto and E. Palazzolo and C. Stachniss},
    title = {{Geometrical Stem Detection from Image Data for Precision Agriculture}},
    journal = arxiv,
    year = 2018,
    eprint = {1812.05415v1},
    url = {https://arxiv.org/pdf/1812.05415v1},
    keywords = {cs.RO},
    }

  • L. Drees, R. Roscher, and S. Wenzel, “Archetypal Analysis for Sparse Representation-based Hyperspectral Sub-Pixel Quantification,” Photogrammetric Engineering & Remote Sensing, 2018.
    [BibTeX] [PDF]
    @Article{drees2018arxiv,
    author = {Drees, L. and Roscher, R. and Wenzel, S.},
    title = {Archetypal Analysis for Sparse Representation-based Hyperspectral Sub-Pixel Quantification},
    journal = {Photogrammetric Engineering \& Remote Sensing},
    year = {2018},
    note = {accepted},
    url = {https://arxiv.org/abs/1802.02813},
    }

  • K. Franz, R. Roscher, A. Milioto, S. Wenzel, and J. Kusche, “Ocean Eddy Identification and Tracking using Neural Networks,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2018.
    [BibTeX] [PDF]
    @InProceedings{franz2018ocean,
    author = {Franz, K. and Roscher, R. and Milioto, A. and Wenzel, S. and Kusche, J.},
    title = {Ocean Eddy Identification and Tracking using Neural Networks},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2018},
    note = {accepted},
    url = {https://arxiv.org/abs/arXiv:1803.07436},
    }

  • L. Nardi and C. Stachniss, “Towards Uncertainty-Aware Path Planning for Navigation on Road Networks Using Augmented MDPs,” in 10th Workshop on Planning, Perception and Navigation for Intelligent Vehicles at the IEEE/RSJ Int. Conf. on Intelligent Robots and Systems (IROS), 2018.
    [BibTeX] [PDF] [Video]
    @InProceedings{nardi2018ppniv,
    title = {Towards Uncertainty-Aware Path Planning for Navigation on Road Networks Using Augmented MDPs},
    author = {L. Nardi and C. Stachniss},
    booktitle = {10th Workshop on Planning, Perception and Navigation for Intelligent Vehicles at the IEEE/RSJ Int. Conf. on Intelligent Robots and Systems (IROS)},
    year = {2018},
    videourl = {https://youtu.be/SLp5YVplJAQ}
    }

  • I. Bogoslavskyi, “Robot Mapping and Navigation in Real-World Environments,” PhD Thesis, 2018.
    [BibTeX] [PDF]
    @PhDThesis{bogosalvskyi2018phd,
    author = {I. Bogoslavskyi},
    title = {Robot Mapping and Navigation in Real-World Environments},
    school = {Rheinische Friedrich-Wilhelms University of Bonn},
    year = 2018,
    url = {https://www.ipb.uni-bonn.de/pdfs/bogoslavskyi2018phd.pdf},
    }

  • C. Merfels, “Sensor fusion for localization of automated vehicles,” PhD Thesis, 2018.
    [BibTeX] [PDF]
    @PhDThesis{merfels2018phd,
    author = {C. Merfels},
    title = {{Sensor fusion for localization of automated vehicles}},
    school = {Rheinische Friedrich-Wilhelms University of Bonn},
    year = 2018,
    url = {https://hss.ulb.uni-bonn.de/2018/5276/5276.pdf},
    }

2017

  • C. Beekmans, J. Schneider, T. Laebe, M. Lennefer, C. Stachniss, and C. Simmer, “3D-Cloud Morphology and Motion from Dense Stereo for Fisheye Cameras,” in In Proc. of the European Geosciences Union General Assembly (EGU), 2017.
    [BibTeX] [PDF]
    @InProceedings{beekmans2017egu,
    title = {3D-Cloud Morphology and Motion from Dense Stereo for Fisheye Cameras},
    author = {Ch. Beekmans and J. Schneider and T. Laebe and M. Lennefer and C. Stachniss and C. Simmer},
    booktitle = {In Proc. of the European Geosciences Union General Assembly (EGU)},
    year = {2017},
    }

  • I. Bogoslavskyi and C. Stachniss, “Analyzing the Quality of Matched 3D Point Clouds of Objects,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2017.
    [BibTeX] [PDF]
    [none]
    @InProceedings{bogoslavskyi2017iros,
    title = {Analyzing the Quality of Matched 3D Point Clouds of Objects},
    author = {I. Bogoslavskyi and C. Stachniss},
    booktitle = iros,
    year = {2017},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/bogoslavskyi17iros.pdf},
    }

  • I. Bogoslavskyi and C. Stachniss, “Efficient Online Segmentation for Sparse 3D Laser Scans,” in Journal of Photogrammetry, Remote Sensing and Geoinformation Science (PFG), 2017, p. 41–52.
    [BibTeX] [PDF] [Code] [Video]

    The ability to extract individual objects in the scene is key for a large number of autonomous navigation systems such as mobile robots or autonomous cars. Such systems navigating in dynamic environments need to be aware of objects that may change or move. In most perception cues, a pre-segmentation of the current image or laser scan into individual objects is the first processing step before a further analysis is performed. In this paper, we present an effective method that first removes the ground from the scan and then segments the 3D data in a range image representation into different objects. A key focus of our work is a fast execution with several hundred Hertz. Our implementation has small computational demands so that it can run online on most mobile systems. We explicitly avoid the computation of the 3D point cloud and operate directly on a 2.5D range image, which enables a fast segmentation for each 3D scan. This approach can furthermore handle sparse 3D data well, which is important for scanners such as the new Velodyne VLP-16 scanner. We implemented our approach in C++ and ROS, thoroughly tested it using different 3D scanners, and will release the source code of our implementation. Our method can operate at frame rates that are substantially higher than those of the sensors while using only a single core of a mobile CPU and producing high-quality segmentation results.

    @InProceedings{bogoslavskyi2017pfg,
    title = {Efficient Online Segmentation for Sparse 3D Laser Scans},
    author = {Bogoslavskyi, Igor and Stachniss, Cyrill},
    booktitle = pfg,
    year = {2017},
    pages = {41--52},
    volume = {85},
    issue = {1},
    abstract = {The ability to extract individual objects in the scene is key for a large number of autonomous navigation systems such as mobile robots or autonomous cars. Such systems navigating in dynamic environments need to be aware of objects that may change or move. In most perception cues, a pre-segmentation of the current image or laser scan into individual objects is the first processing step before a further analysis is performed. In this paper, we present an effective method that first removes the ground from the scan and then segments the 3D data in a range image representation into different objects. A key focus of our work is a fast execution with several hundred Hertz. Our implementation has small computational demands so that it can run online on most mobile systems. We explicitly avoid the computation of the 3D point cloud and operate directly on a 2.5D range image, which enables a fast segmentation for each 3D scan. This approach can furthermore handle sparse 3D data well, which is important for scanners such as the new Velodyne VLP-16 scanner. We implemented our approach in C++ and ROS, thoroughly tested it using different 3D scanners, and will release the source code of our implementation. Our method can operate at frame rates that are substantially higher than those of the sensors while using only a single core of a mobile CPU and producing high-quality segmentation results.},
    url = {https://www.ipb.uni-bonn.de/pdfs/bogoslavskyi16pfg.pdf},
    codeurl = {https://github.com/Photogrammetry-Robotics-Bonn/depth_clustering},
    videourl = {https://www.youtube.com/watch?v=6WqsOlHGTLA},
    }

  • D. Bulatov, S. Wenzel, G. Häufel, and J. Meidow, “Chain-Wise Generalization of Road Nerworks Using Model Selection,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2017-06-08 2017, p. 59–66. doi:10.5194/isprs-annals-IV-1-W1-59-2017
    [BibTeX] [PDF]

    Streets are essential entities of urban terrain and their automatized extraction from airborne sensor data is cumbersome because of a complex interplay of geometric, topological and semantic aspects. Given a binary image, representing the road class, centerlines of road segments are extracted by means of skeletonization. The focus of this paper lies in a well-reasoned representation of these segments by means of geometric primitives, such as straight line segments as well as circle and ellipse arcs. We propose the fusion of raw segments based on similarity criteria; the output of this process are the so-called chains which better match to the intuitive perception of what a street is. Further, we propose a two-step approach for chain-wise generalization. First, the chain is pre-segmented using circlePeucker and finally, model selection is used to decide whether two neighboring segments should be fused to a new geometric entity. Thereby, we consider both variance-covariance analysis of residuals and model complexity. The results on a complex data-set with many traffic roundabouts indicate the benefits of the proposed procedure.

    @InProceedings{bulatov2017isprs,
    title = {Chain-Wise Generalization of Road Nerworks Using Model Selection},
    author = {Bulatov, D. and Wenzel, S. and H\"aufel, G. and Meidow, J.},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2017},
    pages = {59--66},
    volume = {IV-1/W1},
    abstract = {Streets are essential entities of urban terrain and their automatized extraction from airborne sensor data is cumbersome because of a complex interplay of geometric, topological and semantic aspects. Given a binary image, representing the road class, centerlines of road segments are extracted by means of skeletonization. The focus of this paper lies in a well-reasoned representation of these segments by means of geometric primitives, such as straight line segments as well as circle and ellipse arcs. We propose the fusion of raw segments based on similarity criteria; the output of this process are the so-called chains which better match to the intuitive perception of what a street is. Further, we propose a two-step approach for chain-wise generalization. First, the chain is pre-segmented using circlePeucker and finally, model selection is used to decide whether two neighboring segments should be fused to a new geometric entity. Thereby, we consider both variance-covariance analysis of residuals and model complexity. The results on a complex data-set with many traffic roundabouts indicate the benefits of the proposed procedure.},
    date = {2017-06-08},
    doi = {10.5194/isprs-annals-IV-1-W1-59-2017},
    url = {https://www.ipb.uni-bonn.de/pdfs/Bulaton2017Chain-Wise.pdf},
    }

  • W. Förstner, Some Comments on the Relations of Photogrammetry and Industry, 2017.
    [BibTeX] [PDF]
    @Unpublished{foerstner2017misc,
    title = {{Some Comments on the Relations of Photogrammetry and Industry}},
    author = {W. F{\"o}rstner},
    note = {Note for Photogrammetric Record},
    year = {2017},
    owner = {wf},
    url = {https://www.ipb.uni-bonn.de/pdfs/foerstner17comments.pdf},
    }

  • W. Förstner and K. Khoshelham, “Efficient and Accurate Registration of Point Clouds with Plane to Plane Correspondences,” in 3rd International Workshop on Recovering 6D Object Pose, 2017.
    [BibTeX] [PDF]

    We propose and analyse methods to efficiently register point clouds based on plane correspondences. This is relevant in man-made environments, where most objects are bounded by planar surfaces. Based on a segmentation of the point clouds into planar regions and matches of planes in different point clouds, we (1) optimally estimate the relative pose(s); (2) provide three direct solutions, of which two take the uncertainty of the given planes into account; and (3) analyse the loss in accuracy of the direct solutions as compared to the optimal solution. The paper presents the different solutions, derives their uncertainty especially of the suboptimal direct solutions, and compares their accuracy based on simulated and real data. We show that the direct methods that exploit the uncertainty of the planes lead to a maximum loss of 2.76 in accuracy of the estimated motion parameters in terms of the achieved standard deviations compared to the optimal estimates. We also show that the results are more accurate than the classical iterative closest point and iterative closest plane method, but the estimation procedures have a significantly lower computational complexity. We finally show how to generalize the estimation scheme to simultaneously register multiple point clouds.

    @InProceedings{foerstner2017ws,
    author = {Wolfgang F{\"o}rstner and Kourosh Khoshelham},
    title = {{Efficient and Accurate Registration of Point Clouds with Plane to Plane Correspondences}},
    booktitle = {3rd International Workshop on Recovering 6D Object Pose},
    year = {2017},
    abstract = {We propose and analyse methods to efficiently register point clouds based on plane correspondences. This is relevant in man-made environments, where most objects are bounded by planar surfaces. Based on a segmentation of the point clouds into planar regions and matches of planes in different point clouds, we (1) optimally estimate the relative pose(s); (2) provide three direct solutions, of which two take the uncertainty of the given planes into account; and (3) analyse the loss in accuracy of the direct solutions as compared to the optimal solution. The paper presents the different solutions, derives their uncertainty especially of the suboptimal direct solutions, and compares their accuracy based on simulated and real data. We show that the direct methods that exploit the uncertainty of the planes lead to a maximum loss of 2.76 in accuracy of the estimated motion parameters in terms of the achieved standard deviations compared to the optimal estimates. We also show that the results are more accurate than the classical iterative closest point and iterative closest plane method, but the estimation procedures have a significantly lower computational complexity. We finally show how to generalize the estimation scheme to simultaneously register multiple point clouds.},
    url = {https://www.ipb.uni-bonn.de/pdfs/foerstner17efficient.pdf},
    }

  • W. Förstner and K. Khoshelham, Supplement to: Efficient and Accurate Registration of Point Clouds with Plane to Plane Correspondences, 2017.
    [BibTeX] [PDF]
    @Unpublished{foerstner2017misc,
    title = {{Supplement to: Efficient and Accurate Registration of Point Clouds with Plane to Plane Correspondences}},
    author = {Wolfgang F{\"o}rstner and Kourosh Khoshelham},
    year = {2017},
    url = {https://www.ipb.uni-bonn.de/pdfs/foerstner17efficient_supp.pdf},
    }

  • A. Kicherer, K. Herzog, N. Bendel, H. Klück, A. Backhaus, M. Wieland, J. C. Rose, L. Klingbeil, T. Läbe, C. Hohl, W. Petry, H. Kuhlmann, U. Seiffert, and R. Töpfer, “Phenoliner: A New Field Phenotyping Platform for Grapevine Research,” Sensors, vol. 17, iss. 7, 2017. doi:10.3390/s17071625
    [BibTeX] [PDF]

    In grapevine research the acquisition of phenotypic data is largely restricted to the field due to its perennial nature and size. The methodologies used to assess morphological traits and phenology are mainly limited to visual scoring. Some measurements for biotic and abiotic stress, as well as for quality assessments, are done by invasive measures. The new evolving sensor technologies provide the opportunity to perform non-destructive evaluations of phenotypic traits using different field phenotyping platforms. One of the biggest technical challenges for field phenotyping of grapevines are the varying light conditions and the background. In the present study the Phenoliner is presented, which represents a novel type of a robust field phenotyping platform. The vehicle is based on a grape harvester following the concept of a moveable tunnel. The tunnel it is equipped with different sensor systems (RGB and NIR camera system, hyperspectral camera, RTK-GPS, orientation sensor) and an artificial broadband light source. It is independent from external light conditions and in combination with artificial background, the Phenoliner enables standardised acquisition of high-quality, geo-referenced sensor data.

    @Article{kicherer2017phenoliner,
    author = {Kicherer, Anna and Herzog, Katja and Bendel, Nele and Klück, Hans-Christian and Backhaus, Andreas and Wieland, Markus and Rose, Johann Christian and Klingbeil, Lasse and Läbe, Thomas and Hohl, Christian and Petry, Willi and Kuhlmann, Heiner and Seiffert, Udo and Töpfer, Reinhard},
    title = {Phenoliner: A New Field Phenotyping Platform for Grapevine Research},
    journal = {Sensors},
    volume = {17},
    year = {2017},
    number = {7},
    url = {https://www.mdpi.com/1424-8220/17/7/1625/pdf},
    issn = {1424-8220},
    abstract = {In grapevine research the acquisition of phenotypic data is largely restricted to the field due to its perennial nature and size. The methodologies used to assess morphological traits and phenology are mainly limited to visual scoring. Some measurements for biotic and abiotic stress, as well as for quality assessments, are done by invasive measures. The new evolving sensor technologies provide the opportunity to perform non-destructive evaluations of phenotypic traits using different field phenotyping platforms. One of the biggest technical challenges for field phenotyping of grapevines are the varying light conditions and the background. In the present study the Phenoliner is presented, which represents a novel type of a robust field phenotyping platform. The vehicle is based on a grape harvester following the concept of a moveable tunnel. The tunnel it is equipped with different sensor systems (RGB and NIR camera system, hyperspectral camera, RTK-GPS, orientation sensor) and an artificial broadband light source. It is independent from external light conditions and in combination with artificial background, the Phenoliner enables standardised acquisition of high-quality, geo-referenced sensor data.},
    doi = {10.3390/s17071625},
    }

  • F. Liebisch, M. Popovic, J. Pfeifer, R. Khanna, P. Lottes, C. Stachniss, A. Pretto, I. S. Kyu, J. Nieto, R. Siegwart, and A. Walter, “Automatic UAV-based field inspection campaigns for weeding in row crops,” in Proc. of the 10th EARSeL SIG Imaging Spectroscopy Workshop, 2017.
    [BibTeX]
    @InProceedings{liebisch2017earsel,
    title = {Automatic UAV-based field inspection campaigns for weeding in row crops},
    author = {F. Liebisch and M. Popovic and J. Pfeifer and R. Khanna and P. Lottes and C. Stachniss and A. Pretto and S. In Kyu and J. Nieto and R. Siegwart and A. Walter},
    booktitle = {Proc. of the 10th EARSeL SIG Imaging Spectroscopy Workshop},
    year = {2017},
    }

  • P. Lottes, M. Höferlin, S. Sander, and C. Stachniss, “Effective Vision-based Classification for Separating Sugar Beets and Weeds for Precision Farming,” Journal of Field Robotics, vol. 34, pp. 1160-1178, 2017. doi:10.1002/rob.21675
    [BibTeX] [PDF]
    @Article{lottes2017jfr,
    title = {Effective Vision-based Classification for Separating Sugar Beets and Weeds for Precision Farming},
    author = {Lottes, Philipp and H\"oferlin, Markus and Sander, Slawomir and Stachniss, Cyrill},
    journal = {Journal of Field Robotics},
    year = {2017},
    volume = {34},
    issue = {6},
    pages = {1160-1178},
    doi = {10.1002/rob.21675},
    issn = {1556-4967},
    timestamp = {2016.10.5},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/lottes16jfr.pdf},
    }

  • N. Chebrolu, P. Lottes, A. Schaefer, W. Winterhalter, W. Burgard, and C. Stachniss, “Agricultural robot dataset for plant classification, localization and mapping on sugar beet fields,” The Intl. Journal of Robotics Research, 2017. doi:10.1177/0278364917720510
    [BibTeX] [PDF]
    @Article{chebrolu2017ijrr,
    title = {Agricultural robot dataset for plant classification, localization and mapping on sugar beet fields},
    author = {N. Chebrolu and P. Lottes and A. Schaefer and W. Winterhalter and W. Burgard and C. Stachniss},
    journal = ijrr,
    year = {2017},
    doi = {10.1177/0278364917720510},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/chebrolu2017ijrr.pdf},
    }

  • P. Lottes, R. Khanna, J. Pfeifer, R. Siegwart, and C. Stachniss, “UAV-Based Crop and Weed Classification for Smart Farming,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2017.
    [BibTeX] [PDF]
    @InProceedings{lottes2017icra,
    title = {UAV-Based Crop and Weed Classification for Smart Farming},
    author = {P. Lottes and R. Khanna and J. Pfeifer and R. Siegwart and C. Stachniss},
    booktitle = icra,
    year = {2017},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/lottes17icra.pdf},
    }

  • P. Lottes and C. Stachniss, “Semi-Supervised Online Visual Crop and Weed Classification in Precision Farming Exploiting Plant Arrangement,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2017.
    [BibTeX] [PDF]
    @InProceedings{lottes2017iros,
    title = {Semi-Supervised Online Visual Crop and Weed Classification in Precision Farming Exploiting Plant Arrangement},
    author = {P. Lottes and C. Stachniss},
    booktitle = iros,
    year = {2017},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/lottes17iros.pdf},
    }

  • C. Merfels and C. Stachniss, “Sensor Fusion for Self-Localisation of Automated Vehicles,” Journal of Photogrammetry, Remote Sensing and Geoinformation Science (PFG), 2017.
    [BibTeX] [PDF]
    @Article{merfels2017pfg,
    title = {Sensor Fusion for Self-Localisation of Automated Vehicles},
    author = {Merfels, C. and Stachniss, C.},
    journal = pfg,
    year = {2017},
    url = {https://link.springer.com/article/10.1007/s41064-017-0008-1},
    }

  • A. Milioto, P. Lottes, and C. Stachniss, “Real-time Blob-wise Sugar Beets vs Weeds Classification for Monitoring Fields using Convolutional Neural Networks,” in Proc. of the ISPRS Conf. on Unmanned Aerial Vehicles in Geomatics (UAV-g), 2017.
    [BibTeX] [PDF]

    UAVs are becoming an important tool for field monitoring and precision farming. A prerequisite for observing and analyzing fields is the ability to identify crops and weeds from image data. In this paper, we address the problem of detecting the sugar beet plants and weeds in the field based solely on image data. We propose a system that combines vegetation detection and deep learning to obtain a high-quality classification of the vegetation in the field into value crops and weeds. We implemented and thoroughly evaluated our system on image data collected from different sugar beet fields and illustrate that our approach allows for accurately identifying the weeds on the field.

    @InProceedings{milioto2017uavg,
    title = {Real-time Blob-wise Sugar Beets vs Weeds Classification for Monitoring Fields using Convolutional Neural Networks},
    author = {A. Milioto and P. Lottes and C. Stachniss},
    booktitle = uavg,
    year = {2017},
    abstract = {UAVs are becoming an important tool for field monitoring and precision farming. A prerequisite for observing and analyzing fields is the ability to identify crops and weeds from image data. In this paper, we address the problem of detecting the sugar beet plants and weeds in the field based solely on image data. We propose a system that combines vegetation detection and deep learning to obtain a high-quality classification of the vegetation in the field into value crops and weeds. We implemented and thoroughly evaluated our system on image data collected from different sugar beet fields and illustrate that our approach allows for accurately identifying the weeds on the field.},
    url = {https://www.ipb.uni-bonn.de/pdfs/milioto17uavg.pdf},
    }

  • L. Nardi and C. Stachniss, “User Preferred Behaviors for Robot Navigation Exploiting Previous Experiences,” in Robotics and Autonomous Systems, 2017. doi:10.1016/j.robot.2017.08.014
    [BibTeX] [PDF]

    Industry demands flexible robots that are able to accomplish different tasks at different locations such as navigation and mobile manipulation. Operators often require mobile robots operating on factory floors to follow definite and predictable behaviors. This becomes particularly important when a robot shares the workspace with other moving entities. In this paper, we present a system for robot navigation that exploits previous experiences to generate predictable behaviors that meet user’s preferences. Preferences are not explicitly formulated but implicitly extracted from robot experiences and automatically considered to plan paths for the successive tasks without requiring experts to hard-code rules or strategies. Our system aims at accomplishing navigation behaviors that follow user’s preferences also to avoid dynamic obstacles. We achieve this by considering a probabilistic approach for modeling uncertain trajectories of the moving entities that share the workspace with the robot. We implemented and thoroughly tested our system both in simulation and on a real mobile robot. The extensive experiments presented in this paper demonstrate that our approach allows a robot for successfully navigating while performing predictable behaviors and meeting user’s preferences

    @InProceedings{nardi2017jras,
    title = {User Preferred Behaviors for Robot Navigation Exploiting Previous Experiences},
    author = {L. Nardi and C. Stachniss},
    booktitle = jras,
    year = {2017},
    doi = {10.1016/j.robot.2017.08.014},
    abstract = {Industry demands flexible robots that are able to accomplish different tasks at different locations such as navigation and mobile manipulation. Operators often require mobile robots operating on factory floors to follow definite and predictable behaviors. This becomes particularly important when a robot shares the workspace with other moving entities. In this paper, we present a system for robot navigation that exploits previous experiences to generate predictable behaviors that meet user’s preferences. Preferences are not explicitly formulated but implicitly extracted from robot experiences and automatically considered to plan paths for the successive tasks without requiring experts to hard-code rules or strategies. Our system aims at accomplishing navigation behaviors that follow user’s preferences also to avoid dynamic obstacles. We achieve this by considering a probabilistic approach for modeling uncertain trajectories of the moving entities that share the workspace with the robot. We implemented and thoroughly tested our system both in simulation and on a real mobile robot. The extensive experiments presented in this paper demonstrate that our approach allows a robot for successfully navigating while performing predictable behaviors and meeting user’s preferences},
    url = {https://www.ipb.uni-bonn.de/pdfs/nardi17jras.pdf},
    }

  • E. Palazzolo and C. Stachniss, “Information-Driven Autonomous Exploration for a Vision-Based MAV,” in ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences, 2017.
    [BibTeX] [PDF]
    @InProceedings{palazzolo2017uavg,
    title = {Information-Driven Autonomous Exploration for a Vision-Based MAV},
    author = {E. Palazzolo and C. Stachniss},
    booktitle = {ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2017},
    url = {https://www.ipb.uni-bonn.de/pdfs/palazzolo2017uavg.pdf},
    }

  • E. Palazzolo and C. Stachniss, “Change Detection in 3D Models Based on Camera Images,” in 9th Workshop on Planning, Perception and Navigation for Intelligent Vehicles at the IEEE/RSJ Int. Conf. on Intelligent Robots and Systems (IROS), 2017.
    [BibTeX] [PDF]
    @InProceedings{palazzolo2017irosws,
    title = {Change Detection in 3D Models Based on Camera Images},
    author = {E. Palazzolo and C. Stachniss},
    booktitle = {9th Workshop on Planning, Perception and Navigation for Intelligent Vehicles at the IEEE/RSJ Int. Conf. on Intelligent Robots and Systems (IROS)},
    year = {2017},
    url = {https://www.ipb.uni-bonn.de/pdfs/palazzolo2017irosws},
    }

  • J. Schneider, C. Stachniss, and W. Förstner, “On the Quality and Efficiency of Approximate Solutions to Bundle Adjustment with Epipolar and Trifocal Constraints,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2017, pp. 81-88. doi:10.5194/isprs-annals-IV-2-W3-81-2017
    [BibTeX] [PDF]

    Bundle adjustment is a central part of most visual SLAM and Structure from Motion systems and thus a relevant component of UAVs equipped with cameras. This paper makes two contributions to bundle adjustment. First, we present a novel approach which exploits trifocal constraints, i.e., constraints resulting from corresponding points observed in three camera images, which allows to estimate the camera pose parameters without 3D point estimation. Second, we analyze the quality loss compared to the optimal bundle adjustment solution when applying different types of approximations to the constrained optimization problem to increase efficiency. We implemented and thoroughly evaluated our approach using a UAV performing mapping tasks in outdoor environments. Our results indicate that the complexity of the constraint bundle adjustment can be decreased without loosing too much accuracy.

    @InProceedings{schneider2017uavg,
    title = {On the Quality and Efficiency of Approximate Solutions to Bundle Adjustment with Epipolar and Trifocal Constraints},
    author = {J. Schneider and C. Stachniss and W. F\"orstner},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2017},
    pages = {81-88},
    volume = {IV-2/W3},
    abstract = {Bundle adjustment is a central part of most visual SLAM and Structure from Motion systems and thus a relevant component of UAVs equipped with cameras. This paper makes two contributions to bundle adjustment. First, we present a novel approach which exploits trifocal constraints, i.e., constraints resulting from corresponding points observed in three camera images, which allows to estimate the camera pose parameters without 3D point estimation. Second, we analyze the quality loss compared to the optimal bundle adjustment solution when applying different types of approximations to the constrained optimization problem to increase efficiency. We implemented and thoroughly evaluated our approach using a UAV performing mapping tasks in outdoor environments. Our results indicate that the complexity of the constraint bundle adjustment can be decreased without loosing too much accuracy.},
    doi = {10.5194/isprs-annals-IV-2-W3-81-2017},
    url = {https://www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net/IV-2-W3/81/2017/isprs-annals-IV-2-W3-81-2017.pdf},
    }

  • O. Vysotska and C. Stachniss, “Improving SLAM by Exploiting Building Information from Publicly Available Maps and Localization Priors,” Journal of Photogrammetry, Remote Sensing and Geoinformation Science (PFG), vol. 85, iss. 1, pp. 53-65, 2017.
    [BibTeX] [PDF] [Video]
    @Article{vysotska2017pfg,
    title = {Improving SLAM by Exploiting Building Information from Publicly Available Maps and Localization Priors},
    author = {Vysotska, O. and Stachniss, C.},
    journal = pfg,
    year = {2017},
    number = {1},
    pages = {53-65},
    volume = {85},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska2016pfg.pdf},
    videourl = {https://www.youtube.com/watch?v=dKHlF3OkEV4},
    }

  • O. Vysotska and C. Stachniss, “Relocalization under Substantial Appearance Changes using Hashing,” in IROS Workshop on Planning, Perception and Navigation for Intelligent Vehicles, 2017.
    [BibTeX] [PDF] [Code]
    [none]
    @InProceedings{vysotska2017irosws,
    title = {Relocalization under Substantial Appearance Changes using Hashing},
    author = {O. Vysotska and C. Stachniss},
    booktitle = {IROS Workshop on Planning, Perception and Navigation for Intelligent Vehicles},
    year = {2017},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska2017irosws.pdf},
    codeurl = {https://github.com/Photogrammetry-Robotics-Bonn/vpr_relocalization},
    }

  • J. Jung, C. Stachniss, and C. Kim, “Automatic room segmentation of 3D laser data using morphological processing,” ISPRS International Journal of Geo-Information, 2017.
    [BibTeX] [PDF]
    @Article{jung2017ijgi,
    author = {J. Jung and C. Stachniss and C. Kim},
    title = {Automatic room segmentation of 3D laser data using morphological processing},
    journal = {ISPRS International Journal of Geo-Information},
    year = {2017},
    url = {https://www.mdpi.com/2220-9964/6/7/206},
    }

  • R. Schirmer, P. Biber, and C. Stachniss, “Efficient Path Planning in Belief Space for Safe Navigation,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2017.
    [BibTeX] [PDF]

    Robotic lawn-mowers are required to stay within a predefined working area, otherwise they may drive into a pond or on the street. This turns navigation and path planning into safety critical components. If we consider using SLAM techniques in that context, we must be able to provide safety guarantees in the presence of sensor/actuator noise and featureless areas in the environment. In this paper, we tackle the problem of planning a path that maximizes robot safety while navigating inside the working area and under the constraints of limited computing resources and cheap sensors. Our approach uses a map of the environment to estimate localizability at all locations, and it uses these estimates to search for a path from start to goal in belief space using an extended heuristic search algorithm. We implemented our approach using C++ and ROS and thoroughly tested it on simulation data recorded on eight different gardens, as well as on a real robot. The experiments presented in this paper show that our approach leads to short computation times and short paths while maximizing robot safety under certain assumptions.

    @InProceedings{schirmer2017iros,
    author = {R. Schirmer and P. Biber and C. Stachniss},
    title = {Efficient Path Planning in Belief Space for Safe Navigation},
    booktitle = iros,
    year = {2017},
    abstract = {Robotic lawn-mowers are required to stay within a predefined working area, otherwise they may drive into a pond or on the street. This turns navigation and path planning into safety critical components. If we consider using SLAM techniques in that context, we must be able to provide safety guarantees in the presence of sensor/actuator noise and featureless areas in the environment. In this paper, we tackle the problem of planning a path that maximizes robot safety while navigating inside the working area and under the constraints of limited computing resources and cheap sensors. Our approach uses a map of the environment to estimate localizability at all locations, and it uses these estimates to search for a path from start to goal in belief space using an extended heuristic search algorithm. We implemented our approach using C++ and ROS and thoroughly tested it on simulation data recorded on eight different gardens, as well as on a real robot. The experiments presented in this paper show that our approach leads to short computation times and short paths while maximizing robot safety under certain assumptions.},
    url = {https://www.ipb.uni-bonn.de/pdfs/schirmer17iros.pdf},
    }

  • K. H. Huang and C. Stachniss, “Extrinsic Multi-Sensor Calibration For Mobile Robots Using the Gauss-Helmert Model,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2017.
    [BibTeX] [PDF]
    @InProceedings{huang2017iros,
    author = {K.H. Huang and C. Stachniss},
    title = {Extrinsic Multi-Sensor Calibration For Mobile Robots Using the Gauss-Helmert Model},
    booktitle = iros,
    year = 2017,
    url = {https://www.ipb.uni-bonn.de/pdfs/huang2017iros.pdf},
    }

  • A. Bettge, R. Roscher, and S. Wenzel, “Deep self-taught learning for remote sensing image classification,” in Proc. Conf. on Big Data from Space, 2017. doi:10.2760/383579
    [BibTeX] [PDF]

    This paper addresses the land cover classification task for remote sensing images by deep self-taught learning. Our self-taught learning approach learns suitable feature representations of the input data using sparse representation and undercomplete dictionary learning. We propose a deep learning framework which extracts representations in multiple layers and use the output of the deepest layer as input to a classification algorithm. We evaluate our approach using a multispectral Landsat 5 TM image of a study area in the North of Novo Progresso (South America) and the Zurich Summer Data Set provided by the University of Zurich. Experiments indicate that features learned by a deep self-taught learning framework can be used for classification and improve the results compared to classification results using the original feature representation.

    @InProceedings{bettge2017bids,
    author = {Bettge, A. and Roscher, R. and Wenzel, S.},
    title = {Deep self-taught learning for remote sensing image classification},
    booktitle = {Proc. Conf. on Big Data from Space},
    year = {2017},
    abstract = {This paper addresses the land cover classification task for remote sensing images by deep self-taught learning. Our self-taught learning approach learns suitable feature representations of the input data using sparse representation and undercomplete dictionary learning. We propose a deep learning framework which extracts representations in multiple layers and use the output of the deepest layer as input to a classification algorithm. We evaluate our approach using a multispectral Landsat 5 TM image of a study area in the North of Novo Progresso (South America) and the Zurich Summer Data Set provided by the University of Zurich. Experiments indicate that features learned by a deep self-taught learning framework can be used for classification and improve the results compared to classification results using the original feature representation.},
    doi = {10.2760/383579},
    url = {https://publications.jrc.ec.europa.eu/repository/bitstream/JRC108361/jrc180361_procbids17.pdf},
    }

  • A. Braakmann-Folgmann, R. Roscher, S. Wenzel, B. Uebbing, and J. Kusche, “Sea level anomaly prediction using recurrent neural networks,” in Proc. of the Conf. on Big Data from Space, 2017. doi:10.2760/383579
    [BibTeX] [PDF]

    Sea level change, one of the most dire impacts of anthropogenic global warming, will affect a large amount of the world’s population. However, sea level change is not uniform in time and space, and the skill of conventional prediction methods is limited due to the ocean’s internal variabi-lity on timescales from weeks to decades. Here we study the potential of neural network methods which have been used successfully in other applications, but rarely been applied for this task. We develop a combination of a convolutional neural network (CNN) and a recurrent neural network (RNN) to analyse both the spatial and the temporal evolution of sea level and to suggest an independent, accurate method to predict interannual sea level anomalies (SLA). We test our method for the northern and equatorial Pacific Ocean, using gridded altimeter-derived SLA data. We show that the used network designs outperform a simple regression and that adding a CNN improves the skill significantly. The predictions are stable over several years.

    @InProceedings{braakmann-folgmann2017bids,
    author = {Braakmann-Folgmann, A. and Roscher, R. and Wenzel, S. and Uebbing, B. and Kusche, J.},
    title = {Sea level anomaly prediction using recurrent neural networks},
    booktitle = {Proc. of the Conf. on Big Data from Space},
    year = {2017},
    abstract = {Sea level change, one of the most dire impacts of anthropogenic global warming, will affect a large amount of the world's population. However, sea level change is not uniform in time and space, and the skill of conventional prediction methods is limited due to the ocean's internal variabi-lity on timescales from weeks to decades. Here we study the potential of neural network methods which have been used successfully in other applications, but rarely been applied for this task. We develop a combination of a convolutional neural network (CNN) and a recurrent neural network (RNN) to analyse both the spatial and the temporal evolution of sea level and to suggest an independent, accurate method to predict interannual sea level anomalies (SLA). We test our method for the northern and equatorial Pacific Ocean, using gridded altimeter-derived SLA data. We show that the used network designs outperform a simple regression and that adding a CNN improves the skill significantly. The predictions are stable over several years.},
    doi = {10.2760/383579},
    url = {https://publications.jrc.ec.europa.eu/repository/bitstream/JRC108361/jrc180361_procbids17.pdf},
    }

  • R. Roscher, L. Drees, and S. Wenzel, “Sparse representation-based archetypal graphs for spectral clustering,” in IEEE International Geoscience and Remote Sensing Symposium, 2017.
    [BibTeX] [PDF]
    @InProceedings{roscher2017igrss,
    author = {Roscher, R. and Drees, L. and Wenzel, S.},
    title = {Sparse representation-based archetypal graphs for spectral clustering},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium},
    year = {2017},
    owner = {ribana},
    timestamp = {2017.12.08},
    url = {https://www.researchgate.net/publication/321680475_Sparse_representation-based_archetypal_graphs_for_spectral_clustering},
    }

2016

  • N. Abdo, C. Stachniss, L. Spinello, and W. Burgard, “Organizing Objects by Predicting User Preferences Through Collaborative Filtering,” The Intl. Journal of Robotics Research, 2016.
    [BibTeX] [PDF]
    [none]
    @Article{abdo16ijrr,
    title = {Organizing Objects by Predicting User Preferences Through Collaborative Filtering},
    author = {N. Abdo and C. Stachniss and L. Spinello and W. Burgard},
    journal = ijrr,
    year = {2016},
    note = {arXiv:1512.06362},
    abstract = {[none]},
    url = {https://arxiv.org/abs/1512.06362},
    }

  • C. Beekmans, J. Schneider, T. Läbe, M. Lennefer, C. Stachniss, and C. Simmer, “Cloud Photogrammetry with Dense Stereo for Fisheye Cameras,” Atmospheric Chemistry and Physics (ACP), vol. 16, iss. 22, pp. 14231-14248, 2016. doi:10.5194/acp-16-14231-2016
    [BibTeX] [PDF]

    We present a novel approach for dense 3-D cloud reconstruction above an area of 10 × 10 km2 using two hemispheric sky imagers with fisheye lenses in a stereo setup. We examine an epipolar rectification model designed for fisheye cameras, which allows the use of efficient out-of-the-box dense matching algorithms designed for classical pinhole-type cameras to search for correspondence information at every pixel. The resulting dense point cloud allows to recover a detailed and more complete cloud morphology compared to previous approaches that employed sparse feature-based stereo or assumed geometric constraints on the cloud field. Our approach is very efficient and can be fully automated. From the obtained 3-D shapes, cloud dynamics, size, motion, type and spacing can be derived, and used for radiation closure under cloudy conditions, for example. Fisheye lenses follow a different projection function than classical pinhole-type cameras and provide a large field of view with a single image. However, the computation of dense 3-D information is more complicated and standard implementations for dense 3-D stereo reconstruction cannot be easily applied. Together with an appropriate camera calibration, which includes internal camera geometry, global position and orientation of the stereo camera pair, we use the correspondence information from the stereo matching for dense 3-D stereo reconstruction of clouds located around the cameras. We implement and evaluate the proposed approach using real world data and present two case studies. In the first case, we validate the quality and accuracy of the method by comparing the stereo reconstruction of a stratocumulus layer with reflectivity observations measured by a cloud radar and the cloud-base height estimated from a Lidar-ceilometer. The second case analyzes a rapid cumulus evolution in the presence of strong wind shear.

    @Article{beekmans16acp,
    title = {Cloud Photogrammetry with Dense Stereo for Fisheye Cameras},
    author = {C. Beekmans and J. Schneider and T. L\"abe and M. Lennefer and C. Stachniss and C. Simmer},
    journal = {Atmospheric Chemistry and Physics (ACP)},
    year = {2016},
    number = {22},
    pages = {14231-14248},
    volume = {16},
    abstract = {We present a novel approach for dense 3-D cloud reconstruction above an area of 10 × 10 km2 using two hemispheric sky imagers with fisheye lenses in a stereo setup. We examine an epipolar rectification model designed for fisheye cameras, which allows the use of efficient out-of-the-box dense matching algorithms designed for classical pinhole-type cameras to search for correspondence information at every pixel. The resulting dense point cloud allows to recover a detailed and more complete cloud morphology compared to previous approaches that employed sparse feature-based stereo or assumed geometric constraints on the cloud field. Our approach is very efficient and can be fully automated. From the obtained 3-D shapes, cloud dynamics, size, motion, type and spacing can be derived, and used for radiation closure under cloudy conditions, for example. Fisheye lenses follow a different projection function than classical pinhole-type cameras and provide a large field of view with a single image. However, the computation of dense 3-D information is more complicated and standard implementations for dense 3-D stereo reconstruction cannot be easily applied. Together with an appropriate camera calibration, which includes internal camera geometry, global position and orientation of the stereo camera pair, we use the correspondence information from the stereo matching for dense 3-D stereo reconstruction of clouds located around the cameras. We implement and evaluate the proposed approach using real world data and present two case studies. In the first case, we validate the quality and accuracy of the method by comparing the stereo reconstruction of a stratocumulus layer with reflectivity observations measured by a cloud radar and the cloud-base height estimated from a Lidar-ceilometer. The second case analyzes a rapid cumulus evolution in the presence of strong wind shear.},
    doi = {10.5194/acp-16-14231-2016},
    url = {https://www.ipb.uni-bonn.de/pdfs/beekmans16acp.pdf},
    }

  • I. Bogoslavskyi, M. Mazuran, and C. Stachniss, “Robust Homing for Autonomous Robots,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2016.
    [BibTeX] [PDF] [Video]
    [none]
    @InProceedings{bogoslavskyi16icra,
    title = {Robust Homing for Autonomous Robots},
    author = {I. Bogoslavskyi and M. Mazuran and C. Stachniss},
    booktitle = icra,
    year = {2016},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/bogoslavskyi16icra.pdf},
    videourl = {https://www.youtube.com/watch?v=sUvDvq91Vpw},
    }

  • I. Bogoslavskyi and C. Stachniss, “Fast Range Image-Based Segmentation of Sparse 3D Laser Scans for Online Operation,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2016.
    [BibTeX] [PDF] [Code] [Video]
    [none]
    @InProceedings{bogoslavskyi16iros,
    title = {Fast Range Image-Based Segmentation of Sparse 3D Laser Scans for Online Operation},
    author = {I. Bogoslavskyi and C. Stachniss},
    booktitle = iros,
    year = {2016},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/bogoslavskyi16iros.pdf},
    codeurl = {https://github.com/Photogrammetry-Robotics-Bonn/depth_clustering},
    videourl = {https://www.youtube.com/watch?v=6WqsOlHGTLA},
    }

  • W. Förstner, “A Future for Learning Semantic Models of Man-Made Environments,” in Proc. of Int. Conf. on Pattern Recognition (ICPR), 2016.
    [BibTeX] [PDF]

    Deriving semantic 3D models of man-made environments hitherto has not reached the desired maturity which makes human interaction obsolete. Man-made environments play a central role in navigation, city planning, building management systems, disaster management or augmented reality. They are characterised by rich geometric and semantic structures. These cause conceptual problems when learning generic models or when developing automatic acquisition systems. The problems appear to be caused by (1) the incoherence of the models for signal analysis, (2) the type of interplay between discrete and continuous geometric representations, (3) the inefficiency of the interaction between crisp models, such as partonomies and taxonomies, and soft models, mostly having a probabilistic nature, and (4) the vagueness of the used notions in the envisaged application domains. The paper wants to encourage the development and learning of generative models, specifically for man-made objects, to be able to understand, reason about, and explain interpretations.

    @InProceedings{foerstner2016future,
    title = {{A Future for Learning Semantic Models of Man-Made Environments}},
    author = {W. F{\"o}rstner},
    booktitle = {Proc. of Int. Conf. on Pattern Recognition (ICPR)},
    year = {2016},
    abstract = {Deriving semantic 3D models of man-made environments hitherto has not reached the desired maturity which makes human interaction obsolete. Man-made environments play a central role in navigation, city planning, building management systems, disaster management or augmented reality. They are characterised by rich geometric and semantic structures. These cause conceptual problems when learning generic models or when developing automatic acquisition systems. The problems appear to be caused by (1) the incoherence of the models for signal analysis, (2) the type of interplay between discrete and continuous geometric representations, (3) the inefficiency of the interaction between crisp models, such as partonomies and taxonomies, and soft models, mostly having a probabilistic nature, and (4) the vagueness of the used notions in the envisaged application domains. The paper wants to encourage the development and learning of generative models, specifically for man-made objects, to be able to understand, reason about, and explain interpretations.},
    url = {https://www.ipb.uni-bonn.de/pdfs/foerstner16Future.pdf},
    }

  • W. Förstner and B. P. Wrobel, Photogrammetric Computer Vision – Statistics, Geometry, Orientation and Reconstruction, Springer, 2016.
    [BibTeX]
    @Book{foerstner2016photogrammetric,
    title = {{Photogrammetric Computer Vision -- Statistics, Geometry, Orientation and Reconstruction}},
    author = {W. F{\"o}rstner and B. P. Wrobel},
    publisher = {Springer},
    year = {2016},
    }

  • B. Franke, J. Plante, R. Roscher, A. Lee, C. Smyth, A. Hatefi, F. Chen, E. Gil, A. Schwing, A. Selvitella, M. M. Hoffman, R. Grosse, D. Hendricks, and N. Reid, “Statistical Inference, Learning and Models in Big Data,” International Statistical Review, 2016.
    [BibTeX] [PDF]

    Big data provides big opportunities for statistical inference, but perhaps even bigger challenges, often related to differences in volume, variety, velocity, and veracity of information when compared to smaller carefully collected datasets. From January to June, 2015, the Canadian Institute of Statistical Sciences organized a thematic program on Statistical Inference, Learning and Models in Big Data. This paper arose from presentations and discussions that took place during the thematic program.

    @Article{franke2016bigdata,
    title = {Statistical Inference, Learning and Models in Big Data},
    author = {Franke, Beate and Plante, Jean-Fran\c{c}ois and Roscher, Ribana and Lee, Annie and Smyth, Cathal and Hatefi, Armin and Chen, Fuqi and Gil, Einat and Schwing, Alex and Selvitella, Alessandro and Hoffman, Michael M. and Grosse, Roger and Hendricks, Dieter and Reid, Nancy},
    journal = {International Statistical Review},
    year = {2016},
    note = {to appear},
    abstract = {Big data provides big opportunities for statistical inference, but perhaps even bigger challenges, often related to differences in volume, variety, velocity, and veracity of information when compared to smaller carefully collected datasets. From January to June, 2015, the Canadian Institute of Statistical Sciences organized a thematic program on Statistical Inference, Learning and Models in Big Data. This paper arose from presentations and discussions that took place during the thematic program.},
    owner = {ribana},
    timestamp = {2016.03.01},
    url = {https://onlinelibrary.wiley.com/doi/10.1111/insr.12176/full},
    }

  • M. Laîné, S. Cruciani, E. Palazzolo, N. J. Britton, X. Cavarelli, and K. Yoshida, “Navigation System for a Small Size Lunar Exploration Rover with a Monocular Omnidirectional Camera,” in Proc. SPIE, 2016. doi:10.1117/12.2242871
    [BibTeX]
    @InProceedings{laine16spie,
    author = { M. La{\^{i}}n{\'{e}} and S. Cruciani and E. Palazzolo and N.J. Britton and X. Cavarelli and K. Yoshida},
    title = {Navigation System for a Small Size Lunar Exploration Rover with a Monocular Omnidirectional Camera},
    booktitle = {Proc. SPIE},
    volume = {10011},
    year = {2016},
    doi = {10.1117/12.2242871},
    }

  • F. Liebisch, J. Pfeifer, R. Khanna, P. Lottes, C. Stachniss, T. Falck, S. Sander, R. Siegwart, A. Walter, and E. Galceran, “Flourish – A robotic approach for automation in crop management,” in Proc. of the Workshop für Computer-Bildanalyse und unbemannte autonom fliegende Systeme in der Landwirtschaft, 2016.
    [BibTeX] [PDF]
    @InProceedings{liebisch16wslw,
    title = {Flourish -- A robotic approach for automation in crop management},
    author = {F. Liebisch and J. Pfeifer and R. Khanna and P. Lottes and C. Stachniss and T. Falck and S. Sander and R. Siegwart and A. Walter and E. Galceran},
    booktitle = {Proc. of the Workshop f\"ur Computer-Bildanalyse und unbemannte autonom fliegende Systeme in der Landwirtschaft},
    year = {2016},
    timestamp = {2016.06.15},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/liebisch16cbaws.pdf},
    }

  • C. Stachniss, J. Leonard, and S. Thrun, “Springer Handbook of Robotics, 2nd edition,” , B. Siciliano and O. Khatib, Eds., Springer, 2016.
    [BibTeX]
    @InBook{springerbook-slamchapter,
    author = {C. Stachniss and J. Leonard and S. Thrun},
    editor = {B. Siciliano and O. Khatib},
    title = {Springer Handbook of Robotics, 2nd edition},
    chapter = {Chapt.~46: Simultaneous Localization and Mapping},
    publisher = {Springer},
    year = 2016,
    }

  • P. Lottes, M. Höferlin, S. Sander, M. Müter, P. Schulze-Lammers, and C. Stachniss, “An Effective Classification System for Separating Sugar Beets and Weeds for Precision Farming Applications,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2016.
    [BibTeX] [PDF]
    @InProceedings{lottes2016icra,
    title = {An Effective Classification System for Separating Sugar Beets and Weeds for Precision Farming Applications},
    author = {P. Lottes and M. H\"oferlin and S. Sander and M. M\"uter and P. Schulze-Lammers and C. Stachniss},
    booktitle = icra,
    year = {2016},
    timestamp = {2016.01.15},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/lottes16icra.pdf},
    }

  • B. Mack, R. Roscher, S. Stenzel, H. Feilhauer, S. Schmidtlein, and B. Waske, “Mapping raised bogs with an iterative one-class classification approach,” ISPRS Journal of Photogrammetry and Remote Sensing, vol. 120, pp. 53-64, 2016. doi:https://dx.doi.org/10.1016/j.isprsjprs.2016.07.008
    [BibTeX] [PDF]

    Abstract Land use and land cover maps are one of the most commonly used remote sensing products. In many applications the user only requires a map of one particular class of interest, e.g. a specific vegetation type or an invasive species. One-class classifiers are appealing alternatives to common supervised classifiers because they can be trained with labeled training data of the class of interest only. However, training an accurate one-class classification (OCC) model is challenging, particularly when facing a large image, a small class and few training samples. To tackle these problems we propose an iterative \{OCC\} approach. The presented approach uses a biased Support Vector Machine as core classifier. In an iterative pre-classification step a large part of the pixels not belonging to the class of interest is classified. The remaining data is classified by a final classifier with a novel model and threshold selection approach. The specific objective of our study is the classification of raised bogs in a study site in southeast Germany, using multi-seasonal RapidEye data and a small number of training sample. Results demonstrate that the iterative \{OCC\} outperforms other state of the art one-class classifiers and approaches for model selection. The study highlights the potential of the proposed approach for an efficient and improved mapping of small classes such as raised bogs. Overall the proposed approach constitutes a feasible approach and useful modification of a regular one-class classifier.

    @Article{mack2016raised,
    title = {Mapping raised bogs with an iterative one-class classification approach },
    author = {Mack, Benjamin and Roscher, Ribana and Stenzel, Stefanie and Feilhauer, Hannes and Schmidtlein, Sebastian and Waske, Bj{\"o}rn},
    journal = {{ISPRS} Journal of Photogrammetry and Remote Sensing},
    year = {2016},
    pages = {53 - 64},
    volume = {120},
    abstract = {Abstract Land use and land cover maps are one of the most commonly used remote sensing products. In many applications the user only requires a map of one particular class of interest, e.g. a specific vegetation type or an invasive species. One-class classifiers are appealing alternatives to common supervised classifiers because they can be trained with labeled training data of the class of interest only. However, training an accurate one-class classification (OCC) model is challenging, particularly when facing a large image, a small class and few training samples. To tackle these problems we propose an iterative \{OCC\} approach. The presented approach uses a biased Support Vector Machine as core classifier. In an iterative pre-classification step a large part of the pixels not belonging to the class of interest is classified. The remaining data is classified by a final classifier with a novel model and threshold selection approach. The specific objective of our study is the classification of raised bogs in a study site in southeast Germany, using multi-seasonal RapidEye data and a small number of training sample. Results demonstrate that the iterative \{OCC\} outperforms other state of the art one-class classifiers and approaches for model selection. The study highlights the potential of the proposed approach for an efficient and improved mapping of small classes such as raised bogs. Overall the proposed approach constitutes a feasible approach and useful modification of a regular one-class classifier. },
    doi = {https://dx.doi.org/10.1016/j.isprsjprs.2016.07.008},
    issn = {0924-2716},
    keywords = {Remote sensing},
    url = {https://www.sciencedirect.com/science/article/pii/S0924271616302180},
    }

  • C. Merfels and C. Stachniss, “Pose Fusion with Chain Pose Graphs for Automated Driving,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2016.
    [BibTeX] [PDF]
    @InProceedings{merfels16iros,
    title = {Pose Fusion with Chain Pose Graphs for Automated Driving},
    author = {Ch. Merfels and C. Stachniss},
    booktitle = iros,
    year = {2016},
    url = {https://www.ipb.uni-bonn.de/pdfs/merfels16iros.pdf},
    }

  • L. Nardi and C. Stachniss, “Experience-Based Path Planning for Mobile Robots Exploiting User Preferences,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2016. doi:10.1109/IROS.2016.7759197
    [BibTeX] [PDF]

    The demand for flexible industrial robotic solutions that are able to accomplish tasks at different locations in a factory is growing more and more. When deploying mobile robots in a factory environment, the predictability and reproducibility of their behaviors become important and are often requested. In this paper, we propose an easy-to-use motion planning scheme that can take into account user preferences for robot navigation. The preferences are extracted implicitly from the previous experiences or from demonstrations and are automatically considered in the subsequent planning steps. This leads to reproducible and thus better to predict navigation behaviors of the robot, without requiring experts to hard-coding control strategies or cost functions within a planner. Our system has been implemented and evaluated on a simulated KUKA mobile robot in different environments.

    @InProceedings{nardi16iros,
    title = {Experience-Based Path Planning for Mobile Robots Exploiting User Preferences},
    author = {L. Nardi and C. Stachniss},
    booktitle = iros,
    year = {2016},
    doi = {10.1109/IROS.2016.7759197},
    abstract = {The demand for flexible industrial robotic solutions that are able to accomplish tasks at different locations in a factory is growing more and more. When deploying mobile robots in a factory environment, the predictability and reproducibility of their behaviors become important and are often requested. In this paper, we propose an easy-to-use motion planning scheme that can take into account user preferences for robot navigation. The preferences are extracted implicitly from the previous experiences or from demonstrations and are automatically considered in the subsequent planning steps. This leads to reproducible and thus better to predict navigation behaviors of the robot, without requiring experts to hard-coding control strategies or cost functions within a planner. Our system has been implemented and evaluated on a simulated KUKA mobile robot in different environments.},
    url = {https://www.ipb.uni-bonn.de/pdfs/nardi16iros.pdf},
    }

  • S. Osswald, M. Bennewitz, W. Burgard, and C. Stachniss, “Speeding-Up Robot Exploration by Exploiting Background Information,” IEEE Robotics and Automation Letters (RA-L), 2016.
    [BibTeX] [PDF]
    @Article{osswald16ral,
    title = {Speeding-Up Robot Exploration by Exploiting Background Information},
    author = {S. Osswald and M. Bennewitz and W. Burgard and C. Stachniss},
    journal = ral,
    year = {2016},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/osswald16ral.pdf},
    }

  • D. Perea-Ström, I. Bogoslavskyi, and C. Stachniss, “Robust Exploration and Homing for Autonomous Robots,” in Robotics and Autonomous Systems, 2016.
    [BibTeX] [PDF]
    @InProceedings{perea16jras,
    title = {Robust Exploration and Homing for Autonomous Robots},
    author = {D. Perea-Str{\"o}m and I. Bogoslavskyi and C. Stachniss},
    booktitle = jras,
    year = {2016},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/perea16jras.pdf},
    }

  • R. Roscher, J. Behmann, A. -K. Mahlein, J. Dupuis, H. Kuhlmann, and L. Plümer, “Detection of Disease Symptoms on Hyperspectral 3D Plant Models,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2016, p. 89–96.
    [BibTeX]

    We analyze the benefit of combining hyperspectral images information with 3D geometry information for the detection of Cercospora leaf spot disease symptoms on sugar beet plants. Besides commonly used one-class Support Vector Machines, we utilize an unsupervised sparse representation-based approach with group sparsity prior. Geometry information is incorporated by representing each sample of interest with an inclination-sorted dictionary, which can be seen as an 1D topographic dictionary. We compare this approach with a sparse representation based approach without geometry information and One-Class Support Vector Machines. One-Class Support Vector Machines are applied to hyperspectral data without geometry information as well as to hyperspectral images with additional pixelwise inclination information. Our results show a gain in accuracy when using geometry information beside spectral information regardless of the used approach. However, both methods have different demands on the data when applied to new test data sets. One-Class Support Vector Machines require full inclination information on test and training data whereas the topographic dictionary approach only need spectral information for reconstruction of test data once the dictionary is build by spectra with inclination.

    @InProceedings{roscher2016detection,
    title = {Detection of Disease Symptoms on Hyperspectral {3D} Plant Models},
    author = {Roscher, R. and Behmann, J. and Mahlein, A.-K. and Dupuis, J. and Kuhlmann, H. and Pl{\"u}mer, L.},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2016},
    pages = {89--96},
    abstract = {We analyze the benefit of combining hyperspectral images information with 3D geometry information for the detection of Cercospora leaf spot disease symptoms on sugar beet plants. Besides commonly used one-class Support Vector Machines, we utilize an unsupervised sparse representation-based approach with group sparsity prior. Geometry information is incorporated by representing each sample of interest with an inclination-sorted dictionary, which can be seen as an 1D topographic dictionary. We compare this approach with a sparse representation based approach without geometry information and One-Class Support Vector Machines. One-Class Support Vector Machines are applied to hyperspectral data without geometry information as well as to hyperspectral images with additional pixelwise inclination information. Our results show a gain in accuracy when using geometry information beside spectral information regardless of the used approach. However, both methods have different demands on the data when applied to new test data sets. One-Class Support Vector Machines require full inclination information on test and training data whereas the topographic dictionary approach only need spectral information for reconstruction of test data once the dictionary is build by spectra with inclination.},
    }

  • R. Roscher, J. Behmann, A. -K. Mahlein, and L. Plümer, “On the Benefit of Topographic Dictionaries for Detecting Disease Symptoms on Hyperspectral 3D Plant Models,” in Workshop on Hyperspectral Image and Signal Processing, 2016.
    [BibTeX]

    We analyze the benefit of using topographic dictionaries for a sparse representation (SR) approach for the detection of Cercospora leaf spot disease symptoms on sugar beet plants. Topographic dictionaries are an arranged set of basis elements in which neighbored dictionary elements tend to cause similar activations in the SR approach. In this paper, the dictionary is obtained from samples of a healthy plant and partly build in a topographic way by using hyperspectral as well as geometry information, i.e. depth and inclination. It turns out that hyperspectral signals of leafs show a typical structure depending on depth and inclination and thus, both influences can be disentangled in our approach. Rare signals which do not fit into this model, e.g. leaf veins, are also captured in the dictionary in a non-topographic way. A reconstruction error index is used as indicator, in which disease symptoms can be distinguished from healthy plant regions.nThe advantage of the presented approach is that full spectral and geometry information is needed only once to built the dictionary, whereas the sparse reconstruction is done solely on hyperspectral information.

    @InProceedings{roscher2016topographic,
    title = {On the Benefit of Topographic Dictionaries for Detecting Disease Symptoms on Hyperspectral 3D Plant Models},
    author = {Roscher, R. and Behmann, J. and Mahlein, A.-K. and Pl{\"u}mer, L.},
    booktitle = {Workshop on Hyperspectral Image and Signal Processing},
    year = {2016},
    abstract = {We analyze the benefit of using topographic dictionaries for a sparse representation (SR) approach for the detection of Cercospora leaf spot disease symptoms on sugar beet plants. Topographic dictionaries are an arranged set of basis elements in which neighbored dictionary elements tend to cause similar activations in the SR approach. In this paper, the dictionary is obtained from samples of a healthy plant and partly build in a topographic way by using hyperspectral as well as geometry information, i.e. depth and inclination. It turns out that hyperspectral signals of leafs show a typical structure depending on depth and inclination and thus, both influences can be disentangled in our approach. Rare signals which do not fit into this model, e.g. leaf veins, are also captured in the dictionary in a non-topographic way. A reconstruction error index is used as indicator, in which disease symptoms can be distinguished from healthy plant regions.nThe advantage of the presented approach is that full spectral and geometry information is needed only once to built the dictionary, whereas the sparse reconstruction is done solely on hyperspectral information.},
    owner = {ribana},
    timestamp = {2016.06.20},
    }

  • R. Roscher, S. Wenzel, and B. Waske, “Discriminative Archetypal Self-taught Learning for Multispectral Landcover Classification,” in Proc. of Pattern Recogniton in Remote Sensing 2016 (PRRS), Workshop at ICPR; to appear in IEEE Xplore, 2016.
    [BibTeX] [PDF]

    Self-taught learning (STL) has become a promising paradigm to exploit unlabeled data for classification. The most commonly used approach to self-taught learning is sparse representation, in which it is assumed that each sample can be represented by a weighted linear combination of elements of a unlabeled dictionary. This paper proposes discriminative archetypal self-taught learning for the application of landcover classification, in which unlabeled discriminative archetypal samples are selected to build a powerful dictionary. Our main contribution is to present an approach which utilizes reversible jump Markov chain Monte Carlo method to jointly determine the best set of archetypes and the number of elements to build the dictionary. Experiments are conducted using synthetic data, a multi-spectral Landsat 7 image of a study area in the Ukraine and the Zurich benchmark data set comprising 20 multispectral Quickbird images. Our results confirm that the proposed approach can learn discriminative features for classification and show better classification results compared to self-taught learning with the original feature representation and compared to randomly initialized archetypal dictionaries.

    @InProceedings{roscher2016discriminative,
    title = {Discriminative Archetypal Self-taught Learning for Multispectral Landcover Classification},
    author = {Roscher, R. and Wenzel, S. and Waske, B.},
    booktitle = {Proc. of Pattern Recogniton in Remote Sensing 2016 (PRRS), Workshop at ICPR; to appear in IEEE Xplore},
    year = {2016},
    abstract = {Self-taught learning (STL) has become a promising paradigm to exploit unlabeled data for classification. The most commonly used approach to self-taught learning is sparse representation, in which it is assumed that each sample can be represented by a weighted linear combination of elements of a unlabeled dictionary. This paper proposes discriminative archetypal self-taught learning for the application of landcover classification, in which unlabeled discriminative archetypal samples are selected to build a powerful dictionary. Our main contribution is to present an approach which utilizes reversible jump Markov chain Monte Carlo method to jointly determine the best set of archetypes and the number of elements to build the dictionary. Experiments are conducted using synthetic data, a multi-spectral Landsat 7 image of a study area in the Ukraine and the Zurich benchmark data set comprising 20 multispectral Quickbird images. Our results confirm that the proposed approach can learn discriminative features for classification and show better classification results compared to self-taught learning with the original feature representation and compared to randomly initialized archetypal dictionaries.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2016Discriminative.pdf},
    }

  • J. Schneider, C. Eling, L. Klingbeil, H. Kuhlmann, W. Förstner, and C. Stachniss, “Fast and Effective Online Pose Estimation and Mapping for UAVs,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2016, p. 4784–4791. doi:10.1109/ICRA.2016.7487682
    [BibTeX] [PDF]

    Online pose estimation and mapping in unknown environments is essential for most mobile robots. Especially autonomous unmanned aerial vehicles require good pose estimates at comparably high frequencies. In this paper, we propose an effective system for online pose and simultaneous map estimation designed for light-weight UAVs. Our system consists of two components: (1) real-time pose estimation combining RTK-GPS and IMU at 100 Hz and (2) an effective SLAM solution running at 10 Hz using image data from an omnidirectional multi-fisheye-camera system. The SLAM procedure combines spatial resection computed based on the map that is incrementally refined through bundle adjustment and combines the image data with raw GPS observations and IMU data on keyframes. The overall system yields a real-time, georeferenced pose at 100 Hz in GPS-friendly situations. Additionally, we obtain a precise pose and feature map at 10 Hz even in cases where the GPS is not observable or underconstrained. Our system has been implemented and thoroughly tested on a 5 kg copter and yields accurate and reliable pose estimation at high frequencies. We compare the point cloud obtained by our method with a model generated from georeferenced terrestrial laser scanner.

    @InProceedings{schneider16icra,
    title = {Fast and Effective Online Pose Estimation and Mapping for UAVs},
    author = {J. Schneider and C. Eling and L. Klingbeil and H. Kuhlmann and W. F\"orstner and C. Stachniss},
    booktitle = icra,
    year = {2016},
    pages = {4784--4791},
    abstract = {Online pose estimation and mapping in unknown environments is essential for most mobile robots. Especially autonomous unmanned aerial vehicles require good pose estimates at comparably high frequencies. In this paper, we propose an effective system for online pose and simultaneous map estimation designed for light-weight UAVs. Our system consists of two components: (1) real-time pose estimation combining RTK-GPS and IMU at 100 Hz and (2) an effective SLAM solution running at 10 Hz using image data from an omnidirectional multi-fisheye-camera system. The SLAM procedure combines spatial resection computed based on the map that is incrementally refined through bundle adjustment and combines the image data with raw GPS observations and IMU data on keyframes. The overall system yields a real-time, georeferenced pose at 100 Hz in GPS-friendly situations. Additionally, we obtain a precise pose and feature map at 10 Hz even in cases where the GPS is not observable or underconstrained. Our system has been implemented and thoroughly tested on a 5 kg copter and yields accurate and reliable pose estimation at high frequencies. We compare the point cloud obtained by our method with a model generated from georeferenced terrestrial laser scanner.},
    doi = {10.1109/ICRA.2016.7487682},
    url = {https://www.ipb.uni-bonn.de/pdfs/schneider16icra.pdf},
    }

  • J. Schneider, C. Stachniss, and W. Förstner, “Dichtes Stereo mit Fisheye-Kameras,” in UAV 2016 – Vermessung mit unbemannten Flugsystemen, 2016, pp. 247-264.
    [BibTeX]
    @InProceedings{schneider16dvw,
    title = {Dichtes Stereo mit Fisheye-Kameras},
    author = {J. Schneider and C. Stachniss and W. F\"orstner},
    booktitle = {UAV 2016 -- Vermessung mit unbemannten Flugsystemen},
    year = {2016},
    pages = {247-264},
    publisher = {Wi{\ss}ner Verlag},
    series = {Schriftenreihe des DVW},
    volume = {82},
    }

  • J. Schneider, C. Stachniss, and W. Förstner, “On the Accuracy of Dense Fisheye Stereo,” IEEE Robotics and Automation Letters (RA-L), vol. 1, iss. 1, pp. 227-234, 2016. doi:10.1109/LRA.2016.2516509
    [BibTeX] [PDF]

    Fisheye cameras offer a large field of view, which is important for several robotics applications as a larger field of view allows for covering a large area with a single image. In contrast to classical cameras, however, fisheye cameras cannot be approximated well using the pinhole camera model and this renders the computation of depth information from fisheye stereo image pairs more complicated. In this work, we analyze the combination of an epipolar rectification model for fisheye stereo cameras with existing dense methods. This has the advantage that existing dense stereo systems can be applied as a black-box even with cameras that have field of view of more than 180 deg to obtain dense disparity information. We thoroughly investigate the accuracy potential of such fisheye stereo systems using image data from our UAV. The empirical analysis is based on image pairs of a calibrated fisheye stereo camera system and two state-of-the-art algorithms for dense stereo applied to adequately rectified image pairs from fisheye stereo cameras. The canonical stochastic model for sensor points assumes homogeneous uncertainty and we generalize this model based on an empirical analysis using a test scene consisting of mutually orthogonal planes. We show (1) that the combination of adequately rectified fisheye image pairs and dense methods provides dense 3D point clouds at 6-7 Hz on our autonomous multi-copter UAV, (2) that the uncertainty of points depends on their angular distance from the optical axis, (3) how to estimate the variance component as a function of that distance, and (4) how the improved stochastic model improves the accuracy of the scene points.

    @Article{schneider16ral,
    title = {On the Accuracy of Dense Fisheye Stereo},
    author = {J. Schneider and C. Stachniss and W. F\"orstner},
    journal = ral,
    year = {2016},
    number = {1},
    pages = {227-234},
    volume = {1},
    abstract = {Fisheye cameras offer a large field of view, which is important for several robotics applications as a larger field of view allows for covering a large area with a single image. In contrast to classical cameras, however, fisheye cameras cannot be approximated well using the pinhole camera model and this renders the computation of depth information from fisheye stereo image pairs more complicated. In this work, we analyze the combination of an epipolar rectification model for fisheye stereo cameras with existing dense methods. This has the advantage that existing dense stereo systems can be applied as a black-box even with cameras that have field of view of more than 180 deg to obtain dense disparity information. We thoroughly investigate the accuracy potential of such fisheye stereo systems using image data from our UAV. The empirical analysis is based on image pairs of a calibrated fisheye stereo camera system and two state-of-the-art algorithms for dense stereo applied to adequately rectified image pairs from fisheye stereo cameras. The canonical stochastic model for sensor points assumes homogeneous uncertainty and we generalize this model based on an empirical analysis using a test scene consisting of mutually orthogonal planes. We show (1) that the combination of adequately rectified fisheye image pairs and dense methods provides dense 3D point clouds at 6-7 Hz on our autonomous multi-copter UAV, (2) that the uncertainty of points depends on their angular distance from the optical axis, (3) how to estimate the variance component as a function of that distance, and (4) how the improved stochastic model improves the accuracy of the scene points.},
    doi = {10.1109/LRA.2016.2516509},
    url = {https://www.ipb.uni-bonn.de/pdfs/schneider16ral.pdf},
    }

  • T. Schubert, S. Wenzel, R. Roscher, and C. Stachniss, “Investigation of Latent Traces Using Infrared Reflectance Hyperspectral Imaging,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2016, p. 97–102. doi:10.5194/isprs-annals-III-7-97-2016
    [BibTeX] [PDF]

    The detection of traces is a main task of forensic science. A potential method is hyperspectral imaging (HSI) from which we expect to capture more fluorescence effects than with common Forensic Light Sources (FLS). Specimen of blood, semen and saliva traces in several dilution steps are prepared on cardboard substrate. As our key result we successfully make latent traces visible up to highest available dilution (1:8000). We can attribute most of the detectability to interference of electromagnetic light with the water content of the traces in the Shortwave Infrared region of the spectrum. In a classification task we use several dimensionality reduction methods (PCA and LDA) in combination with a Maximum Likelihood (ML) classifier assuming normally distributed data. Random Forest builds a competitive approach. The classifiers retrieve the exact positions of labeled trace preparation up to highest dilution and determine posterior probabilities. By modeling the classification with a Markov Random Field we obtain smoothed results.

    @InProceedings{schubert2016investigation,
    title = {{Investigation of Latent Traces Using Infrared Reflectance Hyperspectral Imaging}},
    author = {Schubert, Till and Wenzel, Susanne and Roscher, Ribana and Stachniss, Cyrill},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2016},
    pages = {97--102},
    volume = {III-7},
    abstract = {The detection of traces is a main task of forensic science. A potential method is hyperspectral imaging (HSI) from which we expect to capture more fluorescence effects than with common Forensic Light Sources (FLS). Specimen of blood, semen and saliva traces in several dilution steps are prepared on cardboard substrate. As our key result we successfully make latent traces visible up to highest available dilution (1:8000). We can attribute most of the detectability to interference of electromagnetic light with the water content of the traces in the Shortwave Infrared region of the spectrum. In a classification task we use several dimensionality reduction methods (PCA and LDA) in combination with a Maximum Likelihood (ML) classifier assuming normally distributed data. Random Forest builds a competitive approach. The classifiers retrieve the exact positions of labeled trace preparation up to highest dilution and determine posterior probabilities. By modeling the classification with a Markov Random Field we obtain smoothed results.},
    doi = {10.5194/isprs-annals-III-7-97-2016},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schubert2016Investigation.pdf},
    }

  • C. Siedentop, V. Laukhart, B. Krastev, D. Kasper, A. Wenden, G. Breuel, and C. Stachniss, “Autonomous Parking Using Previous Paths,” in Advanced Microsystems for Automotive Applications 2015: Smart Systems for Green and Automated Driving. Lecture Notes in Mobility, T. Schulze, B. Müller, and G. Meyer, Eds., Springer, 2016, pp. 3-14. doi:10.1007/978-3-319-20855-8_1
    [BibTeX]
    @InBook{siedentop16lnb,
    title = {Autonomous Parking Using Previous Paths},
    author = {C. Siedentop and V. Laukhart and B. Krastev and D. Kasper and A. Wenden and G. Breuel and C. Stachniss},
    editor = {T. Schulze and B. M{\"u}ller and G. Meyer},
    pages = {3-14},
    publisher = {Springer},
    year = {2016},
    booktitle = {Advanced Microsystems for Automotive Applications 2015: Smart Systems for Green and Automated Driving. Lecture Notes in Mobility},
    doi = {10.1007/978-3-319-20855-8_1},
    }

  • C. Stachniss, “Springer Handbook of Photogrammetry.” Springer, 2016.
    [BibTeX]
    @InBook{springerbook-photo-slamchapter,
    author = {C. Stachniss},
    title = {Springer Handbook of Photogrammetry},
    chapter = {Simultaneous Localization and Mapping},
    publisher = {Springer},
    note = {In German},
    year = {2016},
    }

  • O. Vysotska and C. Stachniss, “Exploiting Building Information from Publicly Available Maps in Graph-Based SLAM,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2016.
    [BibTeX] [PDF] [Video]
    [none]
    @InProceedings{vysotska16iros,
    title = {Exploiting Building Information from Publicly Available Maps in Graph-Based SLAM},
    author = {O. Vysotska and C. Stachniss},
    booktitle = iros,
    year = {2016},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska16iros.pdf},
    videourl = {https://www.youtube.com/watch?v=5RfRAEP-baM},
    }

  • O. Vysotska and C. Stachniss, “Lazy Data Association For Image Sequences Matching Under Substantial Appearance Changes,” IEEE Robotics and Automation Letters (RA-L), vol. 1, iss. 1, pp. 213-220, 2016. doi:10.1109/LRA.2015.2512936
    [BibTeX] [PDF] [Code] [Video]

    Localization is an essential capability for mobile robots and the ability to localize in changing environments is key to robust outdoor navigation. Robots operating over extended periods of time should be able to handle substantial appearance changes such as those occurring over seasons or under different weather conditions. In this letter, we investigate the problem of efficiently coping with seasonal appearance changes in online localization. We propose a lazy data association approach for matching streams of incoming images to a reference image sequence in an online fashion. We present a search heuristic to quickly find matches between the current image sequence and a database using a data association graph. Our experiments conducted under substantial seasonal changes suggest that our approach can efficiently match image sequences while requiring a comparably small number of image to image comparisons

    @Article{vysotska16ral,
    title = {Lazy Data Association For Image Sequences Matching Under Substantial Appearance Changes},
    author = {O. Vysotska and C. Stachniss},
    journal = ral,
    year = {2016},
    number = {1},
    pages = {213-220},
    volume = {1},
    abstract = {Localization is an essential capability for mobile robots and the ability to localize in changing environments is key to robust outdoor navigation. Robots operating over extended periods of time should be able to handle substantial appearance changes such as those occurring over seasons or under different weather conditions. In this letter, we investigate the problem of efficiently coping with seasonal appearance changes in online localization. We propose a lazy data association approach for matching streams of incoming images to a reference image sequence in an online fashion. We present a search heuristic to quickly find matches between the current image sequence and a database using a data association graph. Our experiments conducted under substantial seasonal changes suggest that our approach can efficiently match image sequences while requiring a comparably small number of image to image comparisons},
    doi = {10.1109/LRA.2015.2512936},
    timestamp = {2016.04.18},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska16ral-icra.pdf},
    codeurl = {https://github.com/Photogrammetry-Robotics-Bonn/online_place_recognition},
    videourl = {https://www.youtube.com/watch?v=l-hNk7Z4lSk},
    }

  • S. Wenzel, “High-Level Facade Image Interpretation using Marked Point Processes,” PhD Thesis, 2016.
    [BibTeX] [PDF]

    In this thesis, we address facade image interpretation as one essential ingredient for the generation of high-detailed, semantic meaningful, three-dimensional city-models. Given a single rectified facade image, we detect relevant facade objects such as windows, entrances, and balconies, which yield a description of the image in terms of accurate position and size of these objects. Urban digital three-dimensional reconstruction and documentation is an active area of research with several potential applications, e.g., in the area of digital mapping for navigation, urban planning, emergency management, disaster control or the entertainment industry. A detailed building model which is not just a geometric object enriched with texture, allows for semantic requests as the number of floors or the location of balconies and entrances. Facade image interpretation is one essential step in order to yield such models. In this thesis, we propose the interpretation of facade images by combining evidence for the occurrence of individual object classes which we derive from data, and prior knowledge which guides the image interpretation in its entirety. We present a three-step procedure which generates features that are suited to describe relevant objects, learns a representation that is suited for object detection, and that enables the image interpretation using the results of object detection while incorporating prior knowledge about typical configurations of facade objects, which we learn from training data. According to these three sub-tasks, our major achievements are: We propose a novel method for facade image interpretation based on a marked point process. Therefor, we develop a model for the description of typical configurations of facade objects and propose an image interpretation system which combines evidence derived from data and prior knowledge about typical configurations of facade objects. In order to generate evidence from data, we propose a feature type which we call shapelets. They are scale invariant and provide large distinctiveness for facade objects. Segments of lines, arcs, and ellipses serve as basic features for the generation of shapelets. Therefor, we propose a novel line simplification approach which approximates given pixel-chains by a sequence of lines, circular, and elliptical arcs. Among others, it is based on an adaption to Douglas-Peucker’s algorithm, which is based on circles as basic geometric elements We evaluate each step separately. We show the effects of polyline segmentation and simplification on several images with comparable good or even better results, referring to a state-of-the-art algorithm, which proves their large distinctiveness for facade objects. Using shapelets we provide a reasonable classification performance on a challenging dataset, including intra-class variations, clutter, and scale changes. Finally, we show promising results for the facade interpretation system on several datasets and provide a qualitative evaluation which demonstrates the capability of complete and accurate detection of facade objects.

    @PhDThesis{wenzel2016high-level,
    title = {High-Level Facade Image Interpretation using Marked Point Processes},
    author = {Wenzel, Susanne},
    school = {Department of Photogrammetry, University of Bonn},
    year = {2016},
    abstract = {In this thesis, we address facade image interpretation as one essential ingredient for the generation of high-detailed, semantic meaningful, three-dimensional city-models. Given a single rectified facade image, we detect relevant facade objects such as windows, entrances, and balconies, which yield a description of the image in terms of accurate position and size of these objects. Urban digital three-dimensional reconstruction and documentation is an active area of research with several potential applications, e.g., in the area of digital mapping for navigation, urban planning, emergency management, disaster control or the entertainment industry. A detailed building model which is not just a geometric object enriched with texture, allows for semantic requests as the number of floors or the location of balconies and entrances. Facade image interpretation is one essential step in order to yield such models. In this thesis, we propose the interpretation of facade images by combining evidence for the occurrence of individual object classes which we derive from data, and prior knowledge which guides the image interpretation in its entirety. We present a three-step procedure which generates features that are suited to describe relevant objects, learns a representation that is suited for object detection, and that enables the image interpretation using the results of object detection while incorporating prior knowledge about typical configurations of facade objects, which we learn from training data. According to these three sub-tasks, our major achievements are: We propose a novel method for facade image interpretation based on a marked point process. Therefor, we develop a model for the description of typical configurations of facade objects and propose an image interpretation system which combines evidence derived from data and prior knowledge about typical configurations of facade objects. In order to generate evidence from data, we propose a feature type which we call shapelets. They are scale invariant and provide large distinctiveness for facade objects. Segments of lines, arcs, and ellipses serve as basic features for the generation of shapelets. Therefor, we propose a novel line simplification approach which approximates given pixel-chains by a sequence of lines, circular, and elliptical arcs. Among others, it is based on an adaption to Douglas-Peucker's algorithm, which is based on circles as basic geometric elements We evaluate each step separately. We show the effects of polyline segmentation and simplification on several images with comparable good or even better results, referring to a state-of-the-art algorithm, which proves their large distinctiveness for facade objects. Using shapelets we provide a reasonable classification performance on a challenging dataset, including intra-class variations, clutter, and scale changes. Finally, we show promising results for the facade interpretation system on several datasets and
    provide a qualitative evaluation which demonstrates the capability of complete and accurate detection of facade objects.},
    city = {Bonn},
    url = {https://hss.ulb.uni-bonn.de/2016/4412/4412.htm},
    }

  • S. Wenzel and W. Förstner, “Facade Interpretation Using a Marked Point Process,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2016, p. 363–370. doi:10.5194/isprs-annals-III-3-363-2016
    [BibTeX] [PDF]

    Our objective is the interpretation of facade images in a top-down manner, using a Markov marked point process formulated as a Gibbs process. Given single rectified facade images we aim at the accurate detection of relevant facade objects as windows and entrances, using prior knowledge about their possible configurations within facade images. We represent facade objects by a simplified rectangular object model and present an energy model which evaluates the agreement of a proposed configuration with the given image and the statistics about typical configurations which we learned from training data. We show promising results on different datasets and provide a quantitative evaluation, which demonstrates the capability of complete and accurate detection of facade objects.

    @InProceedings{wenzel2016facade,
    title = {{Facade Interpretation Using a Marked Point Process}},
    author = {Wenzel, Susanne and F{\" o}rstner, Wolfgang},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2016},
    pages = {363--370},
    volume = {III-3},
    abstract = {Our objective is the interpretation of facade images in a top-down manner, using a Markov marked point process formulated as a Gibbs process. Given single rectified facade images we aim at the accurate detection of relevant facade objects as windows and entrances, using prior knowledge about their possible configurations within facade images. We represent facade objects by a simplified rectangular object model and present an energy model which evaluates the agreement of a proposed configuration with the given image and the statistics about typical configurations which we learned from training data. We show promising results on different datasets and provide a quantitative evaluation, which demonstrates the capability of complete and accurate detection of facade objects.},
    doi = {10.5194/isprs-annals-III-3-363-2016},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2016Facade.pdf},
    }

  • C. Merfels, T. Riemenschneider, and C. Stachniss, “Pose fusion with biased and dependent data for automated driving,” in Proc. of the Positioning and Navigation for Intelligent Transportation Systems Conf. (POSNAV ITS), 2016.
    [BibTeX] [PDF]
    @InProceedings{merfels2016posnav,
    author = {C. Merfels and T. Riemenschneider and C. Stachniss},
    title = {Pose fusion with biased and dependent data for automated driving},
    booktitle = {Proc. of the Positioning and Navigation for Intelligent Transportation Systems Conf. (POSNAV ITS)},
    year = 2016,
    }

2015

  • N. Abdo, C. Stachniss, L. Spinello, and W. Burgard, “Collaborative Filtering for Predicting User Preferences for Organizing Objects,” arXiv Preprint, vol. abs/1512.06362, 2015.
    [BibTeX] [PDF]
    [none]
    @Article{abdo15arxiv,
    title = {Collaborative Filtering for Predicting User Preferences for Organizing Objects},
    author = {N. Abdo and C. Stachniss and L. Spinello and W. Burgard},
    journal = arxiv,
    year = {2015},
    note = {arXiv:1512.06362 [cs.RO]},
    volume = {abs/1512.06362},
    abstract = {[none]},
    timestamp = {2016.04.18},
    url = {https://arxiv.org/abs/1512.06362},
    }

  • N. Abdo, C. Stachniss, L. Spinello, and W.Burgard, “Robot, Organize my Shelves! Tidying up Objects by Predicting User Preferences,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2015, pp. 1557-1564. doi:10.1109/ICRA.2015.7139396
    [BibTeX] [PDF]

    As service robots become more and more capable of performing useful tasks for us, there is a growing need to teach robots how we expect them to carry out these tasks. However, learning our preferences is a nontrivial problem, as many of them stem from a variety of factors including personal taste, cultural background, or common sense. Obviously, such factors are hard to formulate or model a priori. In this paper, we present a solution for tidying up objects in containers, e.g., shelves or boxes, by following user preferences. We learn the user preferences using collaborative filtering based on crowdsourced and mined data. First, we predict pairwise object preferences of the user. Then, we subdivide the objects in containers by modeling a spectral clustering problem. Our solution is easy to update, does not require complex modeling, and improves with the amount of user data. We evaluate our approach using crowdsoucing data from over 1,200 users and demonstrate its effectiveness for two tidy-up scenarios. Additionally, we show that a real robot can reliably predict user preferences using our approach.

    @InProceedings{abdo15icra,
    title = {Robot, Organize my Shelves! Tidying up Objects by Predicting User Preferences},
    author = {N. Abdo and C. Stachniss and L. Spinello and W.Burgard},
    booktitle = icra,
    year = {2015},
    pages = {1557-1564},
    abstract = {As service robots become more and more capable of performing useful tasks for us, there is a growing need to teach robots how we expect them to carry out these tasks. However, learning our preferences is a nontrivial problem, as many of them stem from a variety of factors including personal taste, cultural background, or common sense. Obviously, such factors are hard to formulate or model a priori. In this paper, we present a solution for tidying up objects in containers, e.g., shelves or boxes, by following user preferences. We learn the user preferences using collaborative filtering based on crowdsourced and mined data. First, we predict pairwise object preferences of the user. Then, we subdivide the objects in containers by modeling a spectral clustering problem. Our solution is easy to update, does not require complex modeling, and improves with the amount of user data. We evaluate our approach using crowdsoucing data from over 1,200 users and demonstrate its effectiveness for two tidy-up scenarios. Additionally, we show that a real robot can reliably predict user preferences using our approach.},
    doi = {10.1109/ICRA.2015.7139396},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/abdo15icra.pdf},
    }

  • I. Bogoslavskyi, L. Spinello, W. Burgard, and C. Stachniss, “Where to Park%3F Minimizing the Expected Time to Find a Parking Space,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2015, pp. 2147-2152. doi:10.1109/ICRA.2015.7139482
    [BibTeX] [PDF]

    Quickly finding a free parking spot that is close to a desired target location can be a difficult task. This holds for human drivers and autonomous cars alike. In this paper, we investigate the problem of predicting the occupancy of parking spaces and exploiting this information during route planning. We propose an MDP-based planner that considers route information as well as the occupancy probabilities of parking spaces to compute the path that minimizes the expected total time for finding an unoccupied parking space and for walking from the parking location to the target destination. We evaluated our system on real world data gathered over several days in a real parking lot. We furthermore compare our approach to three parking strategies and show that our method outperforms the alternative behaviors.

    @InProceedings{bogoslavskyi15icra,
    title = {Where to Park? Minimizing the Expected Time to Find a Parking Space},
    author = {I. Bogoslavskyi and L. Spinello and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2015},
    pages = {2147-2152},
    abstract = {Quickly finding a free parking spot that is close to a desired target location can be a difficult task. This holds for human drivers and autonomous cars alike. In this paper, we investigate the problem of predicting the occupancy of parking spaces and exploiting this information during route planning. We propose an MDP-based planner that considers route information as well as the occupancy probabilities of parking spaces to compute the path that minimizes the expected total time for finding an unoccupied parking space and for walking from the parking location to the target destination. We evaluated our system on real world data gathered over several days in a real parking lot. We furthermore compare our approach to three parking strategies and show that our method outperforms the alternative behaviors.},
    doi = {10.1109/ICRA.2015.7139482},
    timestamp = {2015.06.29},
    url = {https://www.ipb.uni-bonn.de/pdfs/bogoslavskyi15icra.pdf},
    }

  • F. M. Carlucci, L. Nardi, L. Iocchi, and D. Nardi, “Explicit Representation of Social Norms for Social Robots,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2015, pp. 4191-4196. doi:10.1109/IROS.2015.7353970
    [BibTeX] [PDF]

    As robots are expected to become more and more available in everyday environments, interaction with humans is assuming a central role. Robots working in populated environments are thus expected to demonstrate socially acceptable behaviors and to follow social norms. However, most of the recent works in this field do not address the problem of explicit representation of the social norms and their integration in the reasoning and the execution components of a cognitive robot. In this paper, we address the design of robotic systems that support some social behavior by implementing social norms. We present a framework for planning and execution of social plans, in which social norms are described in a domain and language independent form. A full implementation of the proposed framework is described and tested in a realistic scenario with non-expert and non-recruited users.

    @InProceedings{carlucci15iros,
    title = {Explicit Representation of Social Norms for Social Robots},
    author = {F.M. Carlucci and L. Nardi and L. Iocchi and D. Nardi},
    booktitle = iros,
    year = {2015},
    pages = {4191 - 4196},
    abstract = {As robots are expected to become more and more available in everyday environments, interaction with humans is assuming a central role. Robots working in populated environments are thus expected to demonstrate socially acceptable behaviors and to follow social norms. However, most of the recent works in this field do not address the problem of explicit representation of the social norms and their integration in the reasoning and the execution components of a cognitive robot. In this paper, we address the design of robotic systems that support some social behavior by implementing social norms. We present a framework for planning and execution of social plans, in which social norms are described in a domain and language independent form. A full implementation of the proposed framework is described and tested in a realistic scenario with non-expert and non-recruited users.},
    doi = {10.1109/IROS.2015.7353970},
    timestamp = {2016.04.19},
    url = {https://www.ipb.uni-bonn.de/pdfs/Carlucci2015Explicit.pdf},
    }

  • T. Naseer, M. Ruhnke, L. Spinello, C. Stachniss, and W. Burgard, “Robust Visual SLAM Across Seasons,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2015, pp. 2529-2535. doi:10.1109/IROS.2015.7353721
    [BibTeX] [PDF]

    In this paper, we present an appearance-based visual SLAM approach that focuses on detecting loop closures across seasons. Given two image sequences, our method first extracts one descriptor per image for both sequences using a deep convolutional neural network. Then, we compute a similarity matrix by comparing each image of a query sequence with a database. Finally, based on the similarity matrix, we formulate a flow network problem and compute matching hypotheses between sequences. In this way, our approach can handle partially matching routes, loops in the trajectory and different speeds of the robot. With a matching hypothesis as loop closure information and the odometry information of the robot, we formulate a graph based SLAM problem and compute a joint maximum likelihood trajectory.

    @InProceedings{naseer15iros,
    title = {Robust Visual SLAM Across Seasons},
    author = {Naseer, Tayyab and Ruhnke, Michael and Spinello, Luciano and Stachniss, Cyrill and Burgard, Wolfram},
    booktitle = iros,
    year = {2015},
    pages = {2529 - 2535},
    abstract = {In this paper, we present an appearance-based visual SLAM approach that focuses on detecting loop closures across seasons. Given two image sequences, our method first extracts one descriptor per image for both sequences using a deep convolutional neural network. Then, we compute a similarity matrix by comparing each image of a query sequence with a database. Finally, based on the similarity matrix, we formulate a flow network problem and compute matching hypotheses between sequences. In this way, our approach can handle partially matching routes, loops in the trajectory and different speeds of the robot. With a matching hypothesis as loop closure information and the odometry information of the robot, we formulate a graph based SLAM problem and compute a joint maximum likelihood trajectory.},
    doi = {10.1109/IROS.2015.7353721},
    timestamp = {2016.04.19},
    url = {https://www.ipb.uni-bonn.de/pdfs/Naseer2015Robust.pdf},
    }

  • D. Perea-Ström, F. Nenci, and C. Stachniss, “Predictive Exploration Considering Previously Mapped Environments,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2015, pp. 2761-2766. doi:10.1109/ICRA.2015.7139574
    [BibTeX] [PDF]

    The ability to explore an unknown environment is an important prerequisite for building truly autonomous robots. The central decision that a robot needs to make when exploring an unknown environment is to select the next view point(s) for gathering observations. In this paper, we consider the problem of how to select view points that support the underlying mapping process. We propose a novel approach that makes predictions about the structure of the environments in the unexplored areas by relying on maps acquired previously. Our approach seeks to find similarities between the current surroundings of the robot and previously acquired maps stored in a database in order to predict how the environment may expand in the unknown areas. This allows us to predict potential future loop closures early. This knowledge is used in the view point selection to actively close loops and in this way reduce the uncertainty in the robot’s belief. We implemented and tested the proposed approach. The experiments indicate that our method improves the ability of a robot to explore challenging environments and improves the quality of the resulting maps.

    @InProceedings{perea15icra,
    title = {Predictive Exploration Considering Previously Mapped Environments},
    author = {D. Perea-Str{\"o}m and F. Nenci and C. Stachniss},
    booktitle = icra,
    year = {2015},
    pages = {2761-2766},
    abstract = {The ability to explore an unknown environment is an important prerequisite for building truly autonomous robots. The central decision that a robot needs to make when exploring an unknown environment is to select the next view point(s) for gathering observations. In this paper, we consider the problem of how to select view points that support the underlying mapping process. We propose a novel approach that makes predictions about the structure of the environments in the unexplored areas by relying on maps acquired previously. Our approach seeks to find similarities between the current surroundings of the robot and previously acquired maps stored in a database in order to predict how the environment may expand in the unknown areas. This allows us to predict potential future loop closures early. This knowledge is used in the view point selection to actively close loops and in this way reduce the uncertainty in the robot's belief. We implemented and tested the proposed approach. The experiments indicate that our method improves the ability of a robot to explore challenging environments and improves the quality of the resulting maps.},
    doi = {10.1109/ICRA.2015.7139574},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/perea15icra.pdf},
    }

  • R. Roscher, C. Römer, B. Waske, and L. Plümer, “Landcover classification with self-taught learning on archetypal dictionaries,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2015, pp. 2358-2361. doi:10.1109/IGARSS.2015.7326282
    [BibTeX]
    @InProceedings{roscher2015selftaught,
    title = {Landcover classification with self-taught learning on archetypal dictionaries},
    author = {Roscher, R. and R\"omer, C. and Waske, B. and Pl\"umer, L.},
    booktitle = {{IEEE} International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2015},
    month = {July},
    pages = {2358-2361},
    doi = {10.1109/IGARSS.2015.7326282},
    }

  • R. Roscher, B. Uebbing, and J. Kusche, “Spatio-temporal altimeter waveform retracking via sparse representation and conditional random fields,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2015, pp. 1234-1237. doi:10.1109/IGARSS.2015.7325996
    [BibTeX]
    @InProceedings{roscher2015altimeter,
    title = {Spatio-temporal altimeter waveform retracking via sparse representation and conditional random fields},
    author = {Roscher, R. and Uebbing, B. and Kusche, J.},
    booktitle = {{IEEE} International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2015},
    month = {July},
    pages = {1234-1237},
    doi = {10.1109/IGARSS.2015.7325996},
    }

  • R. Roscher and B. Waske, “Shapelet-Based Sparse Representation for Landcover Classification of Hyperspectral Images,” IEEE Transactions on Geoscience and Remote Sensing, vol. 54, iss. 3, p. 1623–1634, 2015. doi:10.1109/TGRS.2015.2484619
    [BibTeX]

    This paper presents a sparse-representation-based classification approach with a novel dictionary construction procedure. By using the constructed dictionary, sophisticated prior knowledge about the spatial nature of the image can be integrated. The approach is based on the assumption that each image patch can be factorized into characteristic spatial patterns, also called shapelets, and patch-specific spectral information. A set of shapelets is learned in an unsupervised way, and spectral information is embodied by training samples. A combination of shapelets and spectral information is represented in an undercomplete spatial-spectral dictionary for each individual patch, where the elements of the dictionary are linearly combined to a sparse representation of the patch. The patch-based classification is obtained by means of the representation error. Experiments are conducted on three well-known hyperspectral image data sets. They illustrate that our proposed approach shows superior results in comparison to sparse-representation-based classifiers that use only limited spatial information and behaves competitively with or better than state-of-the-art classifiers utilizing spatial information and kernelized sparse-representation-based classifiers.

    @Article{roscher2015shapelet,
    title = {Shapelet-Based Sparse Representation for Landcover Classification of Hyperspectral Images},
    author = {Roscher, R. and Waske, B.},
    journal = {IEEE Transactions on Geoscience and Remote Sensing},
    year = {2015},
    number = {3},
    pages = {1623--1634},
    volume = {54},
    abstract = {This paper presents a sparse-representation-based classification approach with a novel dictionary construction procedure. By using the constructed dictionary, sophisticated prior knowledge about the spatial nature of the image can be integrated. The approach is based on the assumption that each image patch can be factorized into characteristic spatial patterns, also called shapelets, and patch-specific spectral information. A set of shapelets is learned in an unsupervised way, and spectral information is embodied by training samples. A combination of shapelets and spectral information is represented in an undercomplete spatial-spectral dictionary for each individual patch, where the elements of the dictionary are linearly combined to a sparse representation of the patch. The patch-based classification is obtained by means of the representation error. Experiments are conducted on three well-known hyperspectral image data sets. They illustrate that our proposed approach shows superior results in comparison to sparse-representation-based classifiers that use only limited spatial information and behaves competitively with or better than state-of-the-art classifiers utilizing spatial information and kernelized sparse-representation-based classifiers.},
    doi = {10.1109/TGRS.2015.2484619},
    issn = {0196-2892},
    }

  • T. Schubert, “Investigation of Latent Traces Using Hyperspectral Imaging,” bachelor thesis Master Thesis, 2015.
    [BibTeX]

    The detection of traces is a main task of forensic science. A potential method is hyperspectral imaging (HSI) which is the process of recording many narrowband intensity images across a wide range of the light spectrum. From this technique we expect to capture more fluorescence effects than with common Forensic Light Sources (FLS). Specimen of blood, semen and saliva traces in several dilution steps are prepared on cardboard substrate. The hyperspectral images are acquired by scanning with two line sensors of visible and infrared light over the specimen. After an image normalization step we obtain reflectance values arranged as an image plane for each wavelength. The atomic process is initiated by excitation with illumination light such that absorption and elastic scattering cause emission of trace-specific light. In a spectroscopic investigation we can attribute most of the trace-specific signal to chemical interaction of infrared light with the water content of the traces. Image planes (i.e. band images) at infrared wavelengths allow detectability to a much higher level than light of the visible region. Ratio images provide definition of new features which can be established as spectral indices. By these arithmetic operations with image planes we can account for variations in the tissue and make traces even more highlighted towards the fabric. The spectral regions which we obtain at a maximal measure of discriminative power indicate regions known as absorption peaks for biological components such as hemoglobin and water. In this thesis we make latent traces, i.e. non-visible for the human eye, visible up to highest available dilution (1:8000) in infrared data. Hyperspectral images in the region of visible light achieve to detect traces only marginally beyond visibility by human eye. In order to evaluate the detectability of traces we exploit several classifiers to labeled data. We use several dimensionality reduction methods (PCA, LDA, band image and ratio image) in combination with a Maximum Likelihood (ML) classifier assuming normally distributed data. Random Forest builds a competitive approach. In the classification task we retrieve the exact positions of labeled trace preparation up to highest dilution. PCA prior to LDA and ML decision function achieves best results for classifying trace against background. Random Forest is preferable in multiclass classification. On the contrary, neither spectral indices nor classification approaches yield adequate achievements for the application of methods learned on labeled data to other images of specimen with arbitrary fabrics. Customized preprocessing and dimensionality reduction methods achieve no significant reduction of background influence. The proportion of trace-specific signal in the data is not sufficient for this task. We suggest supervision of the illumination light to pointedly initiate trace-specific interference. Concerning field usage of HSI we prefer area-scanning cameras (i.e. image plane acquisition with spectral scanning by a wavelength-tunable bandpass filter). Band and ratio images at established spectral indices qualify for live view screening on an external screen.

    @MastersThesis{schubert2015,
    title = {Investigation of Latent Traces Using Hyperspectral Imaging},
    author = {Till Schubert},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2015},
    type = {bachelor thesis},
    abstract = {The detection of traces is a main task of forensic science. A potential method is hyperspectral imaging (HSI) which is the process of recording many narrowband intensity images across a wide range of the light spectrum. From this technique we expect to capture more fluorescence effects than with common Forensic Light Sources (FLS). Specimen of blood, semen and saliva traces in several dilution steps are prepared on cardboard substrate. The hyperspectral images are acquired by scanning with two line sensors of visible and infrared light over the specimen. After an image normalization step we obtain reflectance values arranged as an image plane for each wavelength. The atomic process is initiated by excitation with illumination light such that absorption and elastic scattering cause emission of trace-specific light. In a spectroscopic investigation we can attribute most of the trace-specific signal to chemical interaction of infrared light with the water content of the traces. Image planes (i.e. band images) at infrared wavelengths allow detectability to a much higher level than light of the visible region. Ratio images provide definition of new features which can be established as spectral indices. By these arithmetic operations with image planes we can account for variations in the tissue and make traces even more highlighted towards the fabric. The spectral regions which we obtain at a maximal measure of discriminative power indicate regions known as absorption peaks for biological components such as hemoglobin and water. In this thesis we make latent traces, i.e. non-visible for the human eye, visible up to highest available dilution (1:8000) in infrared data. Hyperspectral images in the region of visible light achieve to detect traces only marginally beyond visibility by human eye. In order to evaluate the detectability of traces we exploit several classifiers to labeled data. We use several dimensionality reduction methods (PCA, LDA, band image and ratio image) in combination with a Maximum Likelihood (ML) classifier assuming normally distributed data. Random Forest builds a competitive approach. In the classification task we retrieve the exact positions of labeled trace preparation up to highest dilution. PCA prior to LDA and ML decision function achieves best results for classifying trace against background. Random Forest is preferable in multiclass classification. On the contrary, neither spectral indices nor classification approaches yield adequate achievements for the application of methods learned on labeled data to other images of specimen with arbitrary fabrics. Customized preprocessing and dimensionality reduction methods achieve no significant reduction of background influence. The proportion of trace-specific signal in the data is not sufficient for this task. We suggest supervision of the illumination light to pointedly initiate trace-specific interference. Concerning field usage of HSI we prefer area-scanning
    cameras (i.e. image plane acquisition with spectral scanning by a wavelength-tunable bandpass filter). Band and ratio images at established spectral indices qualify for live view screening on an external screen.},
    timestamp = {2015.09.28},
    }

  • C. Siedentop, R. Heinze, D. Kasper, G. Breuel, and C. Stachniss, “Path-Planning for Autonomous Parking with Dubins Curves,” in Proc. of the Workshop Fahrerassistenzsysteme, 2015.
    [BibTeX] [PDF]
    @InProceedings{siedentop15fas,
    title = {Path-Planning for Autonomous Parking with Dubins Curves},
    author = {C. Siedentop and R. Heinze and D. Kasper and G. Breuel and C. Stachniss},
    booktitle = {Proc. of the Workshop Fahrerassistenzsysteme},
    year = {2015},
    }

  • O. Vysotska, T. Naseer, L. Spinello, W. Burgard, and C. Stachniss, “Efficient and Effective Matching of Image Sequences Under Substantial Appearance Changes Exploiting GPS Prior,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2015, pp. 2774-2779. doi:10.1109/ICRA.2015.7139576
    [BibTeX] [PDF] [Code]
    @InProceedings{vysotska15icra,
    title = {Efficient and Effective Matching of Image Sequences Under Substantial Appearance Changes Exploiting GPS Prior},
    author = {O. Vysotska and T. Naseer and L. Spinello and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2015},
    pages = {2774-2779},
    doi = {10.1109/ICRA.2015.7139576},
    timestamp = {2015.06.29},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska15icra.pdf},
    codeurl = {https://github.com/ovysotska/image_sequence_matcher},
    }

  • O. Vysotska and C. Stachniss, “Lazy Sequences Matching Under Substantial Appearance Changes,” in Workshop on Visual Place Recognition in Changing Environments at the IEEE Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2015.
    [BibTeX] [PDF]
    [none]
    @InProceedings{vysotska15icraws,
    title = {Lazy Sequences Matching Under Substantial Appearance Changes},
    author = {O. Vysotska and C. Stachniss},
    booktitle = {Workshop on Visual Place Recognition in Changing Environments at the IEEE } # icra,
    year = {2015},
    abstract = {[none]},
    timestamp = {2015.06.29},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska15icra-ws.pdf},
    }

2014

  • B. Frank, C. Stachniss, R. Schmedding, M. Teschner, and W. Burgard, “Learning object deformation models for robot motion planning,” Robotics and Autonomous Systems, p. -, 2014. doi:https://dx.doi.org/10.1016/j.robot.2014.04.005
    [BibTeX] [PDF]
    [none]
    @Article{frank2014,
    title = {Learning object deformation models for robot motion planning },
    author = {Barbara Frank and Cyrill Stachniss and R\"{u}diger Schmedding and Matthias Teschner and Wolfram Burgard},
    journal = {Robotics and Autonomous Systems },
    year = {2014},
    pages = { - },
    abstract = {[none]},
    crossref = {mn},
    doi = {https://dx.doi.org/10.1016/j.robot.2014.04.005},
    issn = {0921-8890},
    keywords = {Mobile robots},
    url = {https://www.sciencedirect.com/science/article/pii/S0921889014000797},
    }

  • N. Abdo, L. Spinello, W. Burgard, and C. Stachniss, “Inferring What to Imitate in Manipulation Actions by Using a Recommender System,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Hong Kong, China, 2014.
    [BibTeX] [PDF]
    @InProceedings{abdo2014icra,
    title = {Inferring What to Imitate in Manipulation Actions by Using a Recommender System},
    author = {N. Abdo and L. Spinello and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2014},
    address = {Hong Kong, China},
    }

  • P. Agarwal, W. Burgard, and C. Stachniss, “Helmert’s and Bowie’s Geodetic Mapping Methods and Their Relation to Graph-Based SLAM,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Hong Kong, China, 2014.
    [BibTeX]
    @InProceedings{agarwal2014icra,
    title = {Helmert's and Bowie's Geodetic Mapping Methods and Their Relation to Graph-Based SLAM},
    author = {P. Agarwal and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2014},
    address = {Hong Kong, China},
    timestamp = {2014.04.24},
    }

  • P. Agarwal, W. Burgard, and C. Stachniss, “A Survey of Geodetic Approaches to Mapping and the Relationship to Graph-Based SLAM,” IEEE Robotics and Automation Magazine, vol. 21, pp. 63-80, 2014. doi:10.1109/MRA.2014.2322282
    [BibTeX] [PDF]

    The ability to simultaneously localize a robot and build a map of the environment is central to most robotics applications, and the problem is often referred to as simultaneous localization and mapping (SLAM). Robotics researchers have proposed a large variety of solutions allowing robots to build maps and use them for navigation. In addition, the geodetic community has addressed large-scale map building for centuries, computing maps that span across continents. These large-scale mapping processes had to deal with several challenges that are similar to those of the robotics community. In this article, we explain key geodetic map building methods that we believe are relevant for robot mapping. We also aim at providing a geodetic perspective on current state-of-the-art SLAM methods and identifying similarities both in terms of challenges faced and the solutions proposed by both communities. The central goal of this article is to connect both fields and enable future synergies between them.

    @Article{agarwal2014ram,
    title = {A Survey of Geodetic Approaches to Mapping and the Relationship to Graph-Based SLAM},
    author = {Pratik Agarwal and Wolfram Burgard and Cyrill Stachniss},
    journal = {IEEE Robotics and Automation Magazine},
    year = {2014},
    pages = {63 - 80},
    volume = {21},
    abstract = {The ability to simultaneously localize a robot and build a map of the environment is central to most robotics applications, and the problem is often referred to as simultaneous localization and mapping (SLAM). Robotics researchers have proposed a large variety of solutions allowing robots to build maps and use them for navigation. In addition, the geodetic community has addressed large-scale map building for centuries, computing maps that span across continents. These large-scale mapping processes had to deal with several challenges that are similar to those of the robotics community. In this article, we explain key geodetic map building methods that we believe are relevant for robot mapping. We also aim at providing a geodetic perspective on current state-of-the-art SLAM methods and identifying similarities both in terms of challenges faced and the solutions proposed by both communities. The central goal of this article is to connect both fields and enable future synergies between them.},
    doi = {10.1109/MRA.2014.2322282},
    timestamp = {2014.09.18},
    }

  • P. Agarwal, G. Grisetti, G. D. Tipaldi, L. Spinello, W. Burgard, and C. Stachniss, “Experimental Analysis of Dynamic Covariance Scaling for Robust Map Optimization Under Bad Initial Estimates,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Hong Kong, China, 2014.
    [BibTeX]
    [none]
    @InProceedings{agarwal2014-dcs,
    title = {Experimental Analysis of Dynamic Covariance Scaling for Robust Map Optimization Under Bad Initial Estimates},
    author = {P. Agarwal and G. Grisetti and G.D. Tipaldi and L. Spinello and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2014},
    address = {Hong Kong, China},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • M. Flick, “Localisation Using Open Street Map Data,” bachelor thesis Master Thesis, 2014.
    [BibTeX]

    The goal of this project is to build an online localisation system that localises a vehicle by using OpenStreetMap data and a record of the driven path. Since the Global Positioning Service (GPS) can only be used reliably when the satellite signal can be clearly received, which is when positioned outside of buildings and clear interfering signals, it is only accessible for a certain group of users. Furthermore, it can only be used to the conditions of the US government, as it is them who maintain the GPS-system. Our project develops an alternative for localisation by using independent data, such as OpenStreetMap data and measurements of the driven vehicle, the odometry. This approach uses a particle filter to localise a vehicle. It is a sampling approach that samples complex posterior densities over state spaces. Samples, called particles, are resampled according to the likelihood of the vehicle being at that position. To calculate this information, the position of a particle is weighted. A chamfer matching function performs this task by comparing the driven odometry to OpenStreetMap data and finding the best matches. Chamfer matching evaluates the match of edges to query image. The more similar the current odometry to the query image the better the match. According to the euclidean distance of the particles to their nearest match, the importance of the particle is measured. The particle filter loops over time and with each measurement update the particles move according to their motion update and conglomerate on the most likely position. Assuming that this approach can work in real time and with high accuracy, it is usable on its own with free accessible and contemporary geodata. For this purpose the vehicle tracks its driven path, for example by wheel odometry, and both, the track and the OpenStreetMap data, are evaluated during the runtime of the program to get the current position. We show that the particle filter compensates uncertainties of measurement on the basis of loose measuring by performing a robust sampling update. A novel feature of this approach is that we show that the type of odometry does not matter as the chamfer matching and due to the robustness of the particle filter this can overcome. We demonstrate the located position of the vehicle in comparing it to the GPS position of the vehicle to show the difference and accuracy. Also, we will compare the runtime efficiency of GPS to that of the combination of particle filter and chamfer matching approach.

    @MastersThesis{flick2014localisation,
    title = {Localisation Using Open Street Map Data},
    author = {Mareike Flick},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2014},
    type = {bachelor thesis},
    abstract = {The goal of this project is to build an online localisation system that localises a vehicle by using OpenStreetMap data and a record of the driven path. Since the Global Positioning Service (GPS) can only be used reliably when the satellite signal can be clearly received, which is when positioned outside of buildings and clear interfering signals, it is only accessible for a certain group of users. Furthermore, it can only be used to the conditions of the US government, as it is them who maintain the GPS-system. Our project develops an alternative for localisation by using independent data, such as OpenStreetMap data and measurements of the driven vehicle, the odometry. This approach uses a particle filter to localise a vehicle. It is a sampling approach that samples complex posterior densities over state spaces. Samples, called particles, are resampled according to the likelihood of the vehicle being at that position. To calculate this information, the position of a particle is weighted. A chamfer matching function performs this task by comparing the driven odometry to OpenStreetMap data and finding the best matches. Chamfer matching evaluates the match of edges to query image. The more similar the current odometry to the query image the better the match. According to the euclidean distance of the particles to their nearest match, the importance of the particle is measured. The particle filter loops over time and with each measurement update the particles move according to their motion update and conglomerate on the most likely position. Assuming that this approach can work in real time and with high accuracy, it is usable on its own with free accessible and contemporary geodata. For this purpose the vehicle tracks its driven path, for example by wheel odometry, and both, the track and the OpenStreetMap data, are evaluated during the runtime of the program to get the current position. We show that the particle filter compensates uncertainties of measurement on the basis of loose measuring by performing a robust sampling update. A novel feature of this approach is that we show that the type of odometry does not matter as the chamfer matching and due to the robustness of the particle filter this can overcome. We demonstrate the located position of the vehicle in comparing it to the GPS position of the vehicle to show the difference and accuracy. Also, we will compare the runtime efficiency of GPS to that of the combination of particle filter and chamfer matching approach.},
    timestamp = {2015.01.19},
    }

  • K. Franz, “Bestimmung der Trajektorie des ATV-4 bei der Separation von der Ariane-5 Oberstufe aus einer Stereo-Bildsequenz,” bachelor thesis Master Thesis, 2014.
    [BibTeX]

    The successfull launch of the spacecraft, ATV-4, on June 5, 2013 by the German Aerospace Center (abbr. DLR) and the European Space Agency (abbr. ESA) is a relevant issue in photogrammetric regard. For the first time, the seperation process and the first seconds of space flight of an automated transfer vehicle could be recorded, tracked and supervised by assembling a stereo camerasystem at the Ariane rocket. This monitoring task includes the reconstruction of the ATV’s trajectory in the stereo image sequence. As main goal of this bachelor thesis we developed a routine that derives this trajectory. Our approach is based on object tracking by implementing a KLT-Tracker. First, interesting points have to be detected in a region of interest and be tracked over time. The homologous points in the stereo image partner can be extracted by template matching with subpixel precision. Subsequently, the object coordinates can be calculated by spatial intersection. With the resulting 3D point clouds the motion can be computed. However, numerous analyses have shown, that the reconstruction of the ATV’s trajectory is very insufficient due to the mechanical constellations and also to a missing photogrammetric calibration. To overcome this and to get more suitable data, it was necessary to create a test scenario. This data allow a more realistic validation of our approach. The records of the test scenario are realized with a comparable stereo camerasystem to the DLR configurations. However, a photogrammetric calibration is performed for this system. In addition, long distances to the camerasystems are avoided, since such long distances cause problems in the DLR sequence. Here, the stereoscopical evaluation can fail soon. Nevertheless, the trajectory of the ATV is reconstructable. For this purpose, the stereo image sequences have to be shortened. Only about two thirds of the sequence can be used as input for this method. Because of the missing stochastical information the resulting uncertainties can not be adjusted. Especially the implementation on the test data revealed that our approach generates reasonable trajectories.

    @MastersThesis{franz2014,
    title = {Bestimmung der Trajektorie des ATV-4 bei der Separation von der Ariane-5 Oberstufe aus einer Stereo-Bildsequenz},
    author = {Katharina Franz},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2014},
    type = {bachelor thesis},
    abstract = {The successfull launch of the spacecraft, ATV-4, on June 5, 2013 by the German Aerospace Center (abbr. DLR) and the European Space Agency (abbr. ESA) is a relevant issue in photogrammetric regard. For the first time, the seperation process and the first seconds of space flight of an automated transfer vehicle could be recorded, tracked and supervised by assembling a stereo camerasystem at the Ariane rocket. This monitoring task includes the reconstruction of the ATV's trajectory in the stereo image sequence. As main goal of this bachelor thesis we developed a routine that derives this trajectory. Our approach is based on object tracking by implementing a KLT-Tracker. First, interesting points have to be detected in a region of interest and be tracked over time. The homologous points in the stereo image partner can be extracted by template matching with subpixel precision. Subsequently, the object coordinates can be calculated by spatial intersection. With the resulting 3D point clouds the motion can be computed. However, numerous analyses have shown, that the reconstruction of the ATV's trajectory is very insufficient due to the mechanical constellations and also to a missing photogrammetric calibration. To overcome this and to get more suitable data, it was necessary to create a test scenario. This data allow a more realistic validation of our approach. The records of the test scenario are realized with a comparable stereo camerasystem to the DLR configurations. However, a photogrammetric calibration is performed for this system. In addition, long distances to the camerasystems are avoided, since such long distances cause problems in the DLR sequence. Here, the stereoscopical evaluation can fail soon. Nevertheless, the trajectory of the ATV is reconstructable. For this purpose, the stereo image sequences have to be shortened. Only about two thirds of the sequence can be used as input for this method. Because of the missing stochastical information the resulting uncertainties can not be adjusted. Especially the implementation on the test data revealed that our approach generates reasonable trajectories.},
    timestamp = {2014.09.30},
    }

  • R. Hagensieker, R. Roscher, and B. Waske, “Texture-based classification of a tropical forest area using multi-temporal ASAR data,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2014.
    [BibTeX]
    [none]
    @InProceedings{hagensieker2014texture,
    title = {Texture-based classification of a tropical forest area using multi-temporal ASAR data},
    author = {Hagensieker, Ron and Roscher, Ribana and Waske, Bj{\"o}rn},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2014},
    abstract = {[none]},
    owner = {ribana},
    timestamp = {2014.11.04},
    }

  • K. Herzog, R. Roscher, M. Wieland, A. Kicherer, T. Läbe, W. Förstner, H. Kuhlmann, and R. Töpfer, “Initial steps for high-throughput phenotyping in vineyards,” VITIS – Journal of Grapevine Research, vol. 53, iss. 1, p. 1–8, 2014.
    [BibTeX]

    The evaluation of phenotypic characters of grape- vines is required directly in the vineyard and is strongly limited by time, costs and the subjectivity of person in charge. Sensor-based techniques are prerequisite to al- low non-invasive phenotyping of individual plant traits, to increase the quantity of object records and to reduce error variation. Thus, a Prototype-Image-Acquisition- System (PIAS) was developed for semi-automated cap- ture of geo-referenced RGB images in an experimental vineyard. Different strategies were tested for image in- terpretation using Matlab. The interpretation of imag- es from the vineyard with the real background is more practice-oriented but requires the calculation of depth maps. Images were utilised to verify the phenotyping results of two semi-automated and one automated pro- totype image interpretation framework. The semi-auto- mated procedures enable contactless and non-invasive detection of bud burst and quantification of shoots at an early developmental stage (BBCH 10) and enable fast and accurate determination of the grapevine berry size at BBCH 89. Depending on the time of image ac- quisition at BBCH 10 up to 94 \% of green shoots were visible in images. The mean berry size (BBCH 89) was recorded non-invasively with a precision of 1 mm.

    @Article{herzog2014initial,
    title = {Initial steps for high-throughput phenotyping in vineyards},
    author = {Herzog, Katja and Roscher, Ribana and Wieland, Markus and Kicherer,Anna and L\"abe, Thomas and F\"orstner, Wolfgang and Kuhlmann, Heiner and T\"opfer, Reinhard},
    journal = {VITIS - Journal of Grapevine Research},
    year = {2014},
    month = jan,
    number = {1},
    pages = {1--8},
    volume = {53},
    abstract = {The evaluation of phenotypic characters of grape- vines is required directly in the vineyard and is strongly limited by time, costs and the subjectivity of person in charge. Sensor-based techniques are prerequisite to al- low non-invasive phenotyping of individual plant traits, to increase the quantity of object records and to reduce error variation. Thus, a Prototype-Image-Acquisition- System (PIAS) was developed for semi-automated cap- ture of geo-referenced RGB images in an experimental vineyard. Different strategies were tested for image in- terpretation using Matlab. The interpretation of imag- es from the vineyard with the real background is more practice-oriented but requires the calculation of depth maps. Images were utilised to verify the phenotyping results of two semi-automated and one automated pro- totype image interpretation framework. The semi-auto- mated procedures enable contactless and non-invasive detection of bud burst and quantification of shoots at an early developmental stage (BBCH 10) and enable fast and accurate determination of the grapevine berry size at BBCH 89. Depending on the time of image ac- quisition at BBCH 10 up to 94 \% of green shoots were visible in images. The mean berry size (BBCH 89) was recorded non-invasively with a precision of 1 mm.},
    }

  • S. Ito, F. Endres, M. Kuderer, G. D. Tipaldi, C. Stachniss, and W. Burgard, “W-RGB-D: Floor-Plan-Based Indoor Global Localization Using a Depth Camera and WiFi,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Hong Kong, China, 2014.
    [BibTeX] [PDF]
    [none]
    @InProceedings{ito2014,
    title = {W-RGB-D: Floor-Plan-Based Indoor Global Localization Using a Depth Camera and WiFi},
    author = {S. Ito and F. Endres and M. Kuderer and G.D. Tipaldi and C. Stachniss and W. Burgard},
    booktitle = icra,
    year = {2014},
    address = {Hong Kong, China},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www2.informatik.uni-freiburg.de/~tipaldi/papers/ito14icra.pdf},
    }

  • R. Kümmerle, M. Ruhnke, B. Steder, C. Stachniss, and W. Burgard, “Autonomous Robot Navigation in Highly Populated Pedestrian Zones,” Journal of Field Robotics, 2014. doi:10.1002/rob.21534
    [BibTeX] [PDF]
    [none]
    @Article{kummerle14jfr,
    title = {Autonomous Robot Navigation in Highly Populated Pedestrian Zones},
    author = {K{\"u}mmerle, Rainer and Ruhnke, Michael and Steder, Bastian and Stachniss,Cyrill and Burgard, Wolfram},
    journal = jfr,
    year = {2014},
    abstract = {[none]},
    doi = {10.1002/rob.21534},
    timestamp = {2015.01.22},
    url = {https://ais.informatik.uni-freiburg.de/publications/papers/kuemmerle14jfr.pdf},
    }

  • A. Kicherer, R. Roscher, K. Herzog, W. Förstner, and R. Töpfer, “Image based Evaluation for the Detection of Cluster Parameters in Grapevine,” in Acta horticulturae, 2014.
    [BibTeX]
    @InProceedings{kicherer2014evaluation,
    title = {Image based Evaluation for the Detection of Cluster Parameters in Grapevine},
    author = {Kicherer, A. and Roscher, R. and Herzog, K. and F\"orstner, W. and T\"opfer, R.},
    booktitle = {Acta horticulturae},
    year = {2014},
    owner = {ribana},
    timestamp = {2016.06.20},
    }

  • L. Klingbeil, M. Nieuwenhuisen, J. Schneider, C. Eling, D. Droeschel, D. Holz, T. Läbe, W. Förstner, S. Behnke, and H. Kuhlmann, “Towards Autonomous Navigation of an UAV-based Mobile Mapping System,” in 4th International Conf. on Machine Control & Guidance, 2014, p. 136–147.
    [BibTeX] [PDF]

    For situations, where mapping is neither possible from high altitudes nor from the ground, we are developing an autonomous micro aerial vehicle able to fly at low altitudes in close vicinity of obstacles. This vehicle is based on a MikroKopterTM octocopter platform (maximum total weight: 5kg), and contains a dual frequency GPS board, an IMU, a compass, two stereo camera pairs with fisheye lenses, a rotating 3D laser scanner, 8 ultrasound sensors, a real-time processing unit, and a compact PC for on-board ego-motion estimation and obstacle detection for autonomous navigation. A high-resolution camera is used for the actual mapping task, where the environment is reconstructed in three dimensions from images, using a highly accurate bundle adjustment. In this contribution, we describe the sensor system setup and present results from the evaluation of several aspects of the different subsystems as well as initial results from flight tests.

    @InProceedings{klingbeil14mcg,
    title = {Towards Autonomous Navigation of an UAV-based Mobile Mapping System},
    author = {Klingbeil, Lasse and Nieuwenhuisen, Matthias and Schneider, Johannes and Eling, Christian and Droeschel, David and Holz, Dirk and L\"abe, Thomas and F\"orstner, Wolfgang and Behnke, Sven and Kuhlmann, Heiner},
    booktitle = {4th International Conf. on Machine Control \& Guidance},
    year = {2014},
    pages = {136--147},
    abstract = {For situations, where mapping is neither possible from high altitudes nor from the ground, we are developing an autonomous micro aerial vehicle able to fly at low altitudes in close vicinity of obstacles. This vehicle is based on a MikroKopterTM octocopter platform (maximum total weight: 5kg), and contains a dual frequency GPS board, an IMU, a compass, two stereo camera pairs with fisheye lenses, a rotating 3D laser scanner, 8 ultrasound sensors, a real-time processing unit, and a compact PC for on-board ego-motion estimation and obstacle detection for autonomous navigation. A high-resolution camera is used for the actual mapping task, where the environment is reconstructed in three dimensions from images, using a highly accurate bundle adjustment. In this contribution, we describe the sensor system setup and present results from the evaluation of several aspects of the different subsystems as well as initial results from flight tests.},
    url = {https://www.ipb.uni-bonn.de/pdfs/klingbeil14mcg.pdf},
    }

  • B. Mack, R. Roscher, and B. Waske, “Can I trust my one-class classification%3F,” Remote Sensing, vol. 6, iss. 9, p. 8779–8802, 2014.
    [BibTeX] [PDF]

    Contrary to binary and multi-class classifiers, the purpose of a one-class classifier for remote sensing applications is to map only one specific land use/land cover class of interest. Training these classifiers exclusively requires reference data for the class of interest, while training data for other classes is not required. Thus, the acquisition of reference data can be significantly reduced. However, one-class classification is fraught with uncertainty and full automatization is difficult, due to the limited reference information that is available for classifier training. Thus, a user-oriented one-class classification strategy is proposed, which is based among others on the visualization and interpretation of the one-class classifier outcomes during the data processing. Careful interpretation of the diagnostic plots fosters the understanding of the classification outcome, e.g., the class separability and suitability of a particular threshold. In the absence of complete and representative validation data, which is the fact in the context of a real one-class classification application, such information is valuable for evaluation and improving the classification. The potential of the proposed strategy is demonstrated by classifying different crop types with hyperspectral data from Hyperion.

    @Article{mack2014can,
    title = {Can I trust my one-class classification?},
    author = {Mack, Benjamin and Roscher, Ribana and Waske, Bj{\"o}rn},
    journal = {Remote Sensing},
    year = {2014},
    number = {9},
    pages = {8779--8802},
    volume = {6},
    abstract = {Contrary to binary and multi-class classifiers, the purpose of a one-class classifier for remote sensing applications is to map only one specific land use/land cover class of interest. Training these classifiers exclusively requires reference data for the class of interest, while training data for other classes is not required. Thus, the acquisition of reference data can be significantly reduced. However, one-class classification is fraught with uncertainty and full automatization is difficult, due to the limited reference information that is available for classifier training. Thus, a user-oriented one-class classification strategy is proposed, which is based among others on the visualization and interpretation of the one-class classifier outcomes during the data processing. Careful interpretation of the diagnostic plots fosters the understanding of the classification outcome, e.g., the class separability and suitability of a particular threshold. In the absence of complete and representative validation data, which is the fact in the context of a real one-class classification application, such information is valuable for evaluation and improving the classification. The potential of the proposed strategy is demonstrated by classifying different crop types with hyperspectral data from Hyperion.},
    owner = {ribana},
    timestamp = {2014.11.04},
    url = {https://www.ipb.uni-bonn.de/pdfs/Mack2014Can.pdf},
    }

  • M. Mazuran, G. D. Tipaldi, L. Spinello, W. Burgard, and C. Stachniss, “A Statistical Measure for Map Consistency in SLAM,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Hong Kong, China, 2014.
    [BibTeX] [PDF]
    @InProceedings{mazuran2014icra,
    title = {A Statistical Measure for Map Consistency in SLAM},
    author = {M. Mazuran and G.D. Tipaldi and L. Spinello and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2014},
    address = {Hong Kong, China},
    timestamp = {2014.04.24},
    }

  • T. Naseer, L. Spinello, W. Burgard, and Stachniss, “Robust Visual Robot Localization Across Seasons using Network Flows,” in Proc. of the National Conf. on Artificial Intellience (AAAI), 2014.
    [BibTeX] [PDF]
    [none]
    @InProceedings{naseer2014aaai,
    title = {Robust Visual Robot Localization Across Seasons using Network Flows},
    author = {Naseer, T. and Spinello, L. and Burgard, W. and Stachniss},
    booktitle = aaai,
    year = {2014},
    abstract = {[none]},
    timestamp = {2014.05.12},
    }

  • F. Nenci, L. Spinello, and C. Stachniss, “Effective Compression of Range Data Streams for Remote Robot Operations using H.264,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2014.
    [BibTeX] [PDF]
    @InProceedings{nenci2014iros,
    title = {Effective Compression of Range Data Streams for Remote Robot Operations using H.264},
    author = {Fabrizio Nenci and Luciano Spinello and Cyrill Stachniss},
    booktitle = iros,
    year = {2014},
    }

  • S. Oßwald, H. Kretzschmar, W. Burgard, and C. Stachniss, “Learning to Give Route Directions from Human Demonstrations,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Hong Kong, China, 2014.
    [BibTeX] [PDF]
    @InProceedings{osswald2014icra,
    title = {Learning to Give Route Directions from Human Demonstrations},
    author = {S. O{\ss}wald and H. Kretzschmar and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2014},
    address = {Hong Kong, China},
    }

  • R. Roscher, K. Herzog, A. Kunkel, A. Kicherer, R. Töpfer, and W. Förstner, “Automated image analysis framework for high-throughput determination of grapevine berry sizes using conditional random fields,” Computers and Electronics in Agriculture, vol. 100, p. 148–158, 2014. doi:10.1016/j.compag.2013.11.008
    [BibTeX]
    @Article{roscher2014automated,
    title = {Automated image analysis framework for high-throughput determination of grapevine berry sizes using conditional random fields},
    author = {Roscher, Ribana and Herzog, Katja and Kunkel, Annemarie and Kicherer, Anna and T{\"o}pfer, Reinhard and F{\"o}rstner, Wolfgang},
    journal = {Computers and Electronics in Agriculture},
    year = {2014},
    pages = {148--158},
    volume = {100},
    doi = {10.1016/j.compag.2013.11.008},
    publisher = {Elsevier},
    }

  • R. Roscher and B. Waske, “Shapelet-based sparse image representation for landcover classification of hyperspectral data,” in IAPR Workshop on Pattern Recognition in Remote Sensing, 2014, p. 1–6.
    [BibTeX] [PDF]

    This paper presents a novel sparse representation-based classifier for landcover mapping of hyperspectral image data. Each image patch is factorized into segmentation patterns, also called shapelets, and patch-specific spectral features. The combination of both is represented in a patch-specific spatial-spectral dictionary, which is used for a sparse coding procedure for the reconstruction and classification of image patches. Hereby, each image patch is sparsely represented by a linear combination of elements out of the dictionary. The set of shapelets is specifically learned for each image in an unsupervised way in order to capture the image structure. The spectral features are assumed to be the training data. The experiments show that the proposed approach shows superior results in comparison to sparse-representation based classifiers that use no or only limited spatial information and behaves competitive or better than state-of-the-art classifiers utilizing spatial information and kernelized sparse representation-based classifiers.

    @InProceedings{roscher2014shapelet,
    title = {Shapelet-based sparse image representation for landcover classification of hyperspectral data},
    author = {Roscher, Ribana and Waske, Bj{\"o}rn},
    booktitle = {IAPR Workshop on Pattern Recognition in Remote Sensing},
    year = {2014},
    pages = {1--6},
    abstract = {This paper presents a novel sparse representation-based classifier for landcover mapping of hyperspectral image data. Each image patch is factorized into segmentation patterns, also called shapelets, and patch-specific spectral features. The combination of both is represented in a patch-specific spatial-spectral dictionary, which is used for a sparse coding procedure for the reconstruction and classification of image patches. Hereby, each image patch is sparsely represented by a linear combination of elements out of the dictionary. The set of shapelets is specifically learned for each image in an unsupervised way in order to capture the image structure. The spectral features are assumed to be the training data. The experiments show that the proposed approach shows superior results in comparison to sparse-representation based classifiers that use no or only limited spatial information and behaves competitive or better than state-of-the-art classifiers utilizing spatial information and kernelized sparse representation-based classifiers.},
    owner = {ribana},
    timestamp = {2014.11.04},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2014Shapelet.pdf},
    }

  • R. Roscher and B. Waske, “Superpixel-based classification of hyperspectral data using sparse representation and conditional random fields,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2014.
    [BibTeX] [PDF]

    This paper presents a superpixel-based classifier for landcover mapping of hyperspectral image data. The approach relies on the sparse representation of each pixel by a weighted linear combination of the training data. Spatial information is incorporated by using a coarse patch-based neighborhood around each pixel as well as data-adapted superpixels. The classification is done via a hierarchical conditional random field, which utilizes the sparse-representation output and models spatial and hierarchical structures in the hyperspectral image. The experiments show that the proposed approach results in superior accuracies in comparison to sparse-representation based classifiers that solely use a patch-based neighborhood.

    @InProceedings{roscher2014superpixel,
    title = {Superpixel-based classification of hyperspectral data using sparse representation and conditional random fields},
    author = {Roscher, Ribana and Waske, Bj{\"o}rn},
    booktitle = {{IEEE} International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2014},
    abstract = {This paper presents a superpixel-based classifier for landcover mapping of hyperspectral image data. The approach relies on the sparse representation of each pixel by a weighted linear combination of the training data. Spatial information is incorporated by using a coarse patch-based neighborhood around each pixel as well as data-adapted superpixels. The classification is done via a hierarchical conditional random field, which utilizes the sparse-representation output and models spatial and hierarchical structures in the hyperspectral image. The experiments show that the proposed approach results in superior accuracies in comparison to sparse-representation based classifiers that solely use a patch-based neighborhood.},
    owner = {ribana},
    timestamp = {2014.11.04},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2014Superpixel.pdf},
    }

  • J. Schneider and W. Förstner, “Real-time Accurate Geo-localization of a MAV with Omnidirectional Visual Odometry and GPS,” in Computer Vision – ECCV 2014 Workshops, 2014, p. 271–282. doi:10.1007/978-3-319-16178-5_18
    [BibTeX] [PDF]

    This paper presents a system for direct geo-localization of a MAV in an unknown environment using visual odometry and precise real time kinematic (RTK) GPS information. Visual odometry is performed with a multi-camera system with four fisheye cameras that cover a wide field of view which leads to better constraints for localization due to long tracks and a better intersection geometry. Visual observations from the acquired image sequences are refined with a high accuracy on selected keyframes by an incremental bundle adjustment using the iSAM2 algorithm. The optional integration of GPS information yields long-time stability and provides a direct geo-referenced solution. Experiments show the high accuracy which is below 3 cm standard deviation in position.

    @InProceedings{schneider14eccv-ws,
    title = {Real-time Accurate Geo-localization of a MAV with Omnidirectional Visual Odometry and GPS},
    author = {J. Schneider and W. F\"orstner},
    booktitle = {Computer Vision - ECCV 2014 Workshops},
    year = {2014},
    pages = {271--282},
    abstract = {This paper presents a system for direct geo-localization of a MAV in an unknown environment using visual odometry and precise real time kinematic (RTK) GPS information. Visual odometry is performed with a multi-camera system with four fisheye cameras that cover a wide field of view which leads to better constraints for localization due to long tracks and a better intersection geometry. Visual observations from the acquired image sequences are refined with a high accuracy on selected keyframes by an incremental bundle adjustment using the iSAM2 algorithm. The optional integration of GPS information yields long-time stability and provides a direct geo-referenced solution. Experiments show the high accuracy which is below 3 cm standard deviation in position.},
    doi = {10.1007/978-3-319-16178-5_18},
    url = {https://www.ipb.uni-bonn.de/pdfs/schneider14eccv-ws.pdf},
    }

  • J. Schneider, T. Läbe, and W. Förstner, “Real-Time Bundle Adjustment with an Omnidirectional Multi-Camera System and GPS,” in Proc. of the 4th International Conf. on Machine Control & Guidance, 2014, p. 98–103.
    [BibTeX] [PDF]

    In this paper we present our system for visual odometry that performs a fast incremental bundle adjustment for real-time structure and motion estimation in an unknown scene. It is applicable to image streams of a calibrated multi-camera system with omnidirectional cameras. In this paper we use an autonomously flying octocopter that is equipped for visual odometry and obstacle detection with four fisheye cameras, which provide a large field of view. For real-time ego-motion estimation the platform is equipped, besides the cameras, with a dual frequency GPS board, an IMU and a compass. In this paper we show how we apply our system for visual odometry using the synchronized video streams of the four fisheye cameras. The position and orientation information from the GPS-unit and the inertial sensors can optionally be integrated into our system. We will show the obtained accuracy of pure odometry and compare it with the solution from GPS/INS.

    @InProceedings{schneider14mcg,
    title = {Real-Time Bundle Adjustment with an Omnidirectional Multi-Camera System and GPS},
    author = {J. Schneider and T. L\"abe and W. F\"orstner},
    booktitle = {Proc. of the 4th International Conf. on Machine Control \& Guidance},
    year = {2014},
    pages = {98--103},
    abstract = {In this paper we present our system for visual odometry that performs a fast incremental bundle adjustment for real-time structure and motion estimation in an unknown scene. It is applicable to image streams of a calibrated multi-camera system with omnidirectional cameras. In this paper we use an autonomously flying octocopter that is equipped for visual odometry and obstacle detection with four fisheye cameras, which provide a large field of view. For real-time ego-motion estimation the platform is equipped, besides the cameras, with a dual frequency GPS board, an IMU and a compass. In this paper we show how we apply our system for visual odometry using the synchronized video streams of the four fisheye cameras. The position and orientation information from the GPS-unit and the inertial sensors can optionally be integrated into our system. We will show the obtained accuracy of pure odometry and compare it with the solution from GPS/INS.},
    city = {Braunschweig},
    url = {https://www.ipb.uni-bonn.de/pdfs/schneider14mcg.pdf},
    }

  • C. Stachniss and W. Burgard, “Particle Filters for Robot Navigation,” Foundations and Trends in Robotics, vol. 3, iss. 4, pp. 211-282, 2014. doi:10.1561/2300000013
    [BibTeX] [PDF]
    [none]
    @Article{stachniss2014,
    title = {Particle Filters for Robot Navigation},
    author = {C. Stachniss and W. Burgard},
    journal = fntr,
    year = {2014},
    month = {2012, published 2014},
    number = {4},
    pages = {211-282},
    volume = {3},
    abstract = {[none]},
    doi = {10.1561/2300000013},
    timestamp = {2014.04.24},
    url = {https://www.nowpublishers.com/articles/foundations-and-trends-in-robotics/ROB-013},
    }

  • J. Stefanski, O. Chaskovskyy, and B. Waske, “Mapping and monitoring of land use changes in post-Soviet western Ukraine using remote sensing data,” Applied Geography, vol. 55, p. 155–164, 2014. doi:10.1016/j.apgeog.2014.08.003
    [BibTeX]

    While agriculture is expanded and intensified in many parts of the world, decreases in land use intensity and farmland abandonment take place in other parts. Eastern Europe experienced widespread changes of agricultural land use after the collapse of the Soviet Union in 1991, however, rates and patterns of these changes are still not well understood. Our objective was to map and analyze changes of land management regimes, including large-scale cropland, small-scale cropland, and abandoned farmland. Monitoring land management regimes is a promising avenue to better understand the temporal and spatial patterns of land use intensity changes. For mapping and change detection, we used an object-based approach with Superpixel segmentation for delineating objects and a Random Forest classifier. We applied this approach to Landsat and ERS SAR data for the years 1986, 1993, 1999, 2006, and 2010 to estimate change trajectories for this time period in western Ukraine. The first period during the 1990s was characterized by post-socialist transition processes including farmland abandonment and substantial subsistence agriculture. Later on, recultivation processes and the recurrence of industrial, large-scale farming were triggered by global food prices that have led to a growing interest in this region.

    @Article{stefanski2014mapping2,
    title = {Mapping and monitoring of land use changes in post-Soviet western Ukraine using remote sensing data},
    author = {Stefanski, Jan and Chaskovskyy, Oleh and Waske, Bj{\"o}rn},
    journal = {Applied Geography},
    year = {2014},
    pages = {155--164},
    volume = {55},
    abstract = {While agriculture is expanded and intensified in many parts of the world, decreases in land use intensity and farmland abandonment take place in other parts. Eastern Europe experienced widespread changes of agricultural land use after the collapse of the Soviet Union in 1991, however, rates and patterns of these changes are still not well understood. Our objective was to map and analyze changes of land management regimes, including large-scale cropland, small-scale cropland, and abandoned farmland. Monitoring land management regimes is a promising avenue to better understand the temporal and spatial patterns of land use intensity changes. For mapping and change detection, we used an object-based approach with Superpixel segmentation for delineating objects and a Random Forest classifier. We applied this approach to Landsat and ERS SAR data for the years 1986, 1993, 1999, 2006, and 2010 to estimate change trajectories for this time period in western Ukraine. The first period during the 1990s was characterized by post-socialist transition processes including farmland abandonment and substantial subsistence agriculture. Later on, recultivation processes and the recurrence of industrial, large-scale farming were triggered by global food prices that have led to a growing interest in this region.},
    doi = {10.1016/j.apgeog.2014.08.003},
    issn = {01436228},
    }

  • J. Stefanski, T. Kuemmerle, O. Chaskovskyy, P. Griffiths, V. Havryluk, J. Knorn, N. Korol, A. Sieber, and B. Waske, “Mapping Land Management Regimes in Western Ukraine Using Optical and SAR Data,” Remote Sensing, vol. 6, iss. 6, p. 5279–5305, 2014. doi:10.3390/rs6065279
    [BibTeX]

    The global demand for agricultural products is surging due to population growth, more meat-based diets, and the increasing role of bioenergy. Three strategies can increase agricultural production: (1) expanding agriculture into natural ecosystems; (2) intensifying existing farmland; or (3) recultivating abandoned farmland. Because agricultural expansion entails substantial environmental trade-offs, intensification and recultivation are currently gaining increasing attention. Assessing where these strategies may be pursued, however, requires improved spatial information on land use intensity, including where farmland is active and fallow. We developed a framework to integrate optical and radar data in order to advance the mapping of three farmland management regimes: (1) large-scale, mechanized agriculture; (2) small-scale, subsistence agriculture; and (3) fallow or abandoned farmland. We applied this framework to our study area in western Ukraine, a region characterized by marked spatial heterogeneity in management intensity due to the legacies from Soviet land management, the breakdown of the Soviet Union in 1991, and the recent integration of this region into world markets. We mapped land management regimes using a hierarchical, object-based framework. Image segmentation for delineating objects was performed by using the Superpixel Contour algorithm. We then applied Random Forest classification to map land management regimes and validated our map using randomly sampled in-situ data, obtained during an extensive field campaign. Our results showed that farmland management regimes were mapped reliably, resulting in a final map with an overall accuracy of 83.4%. Comparing our land management regimes map with a soil map revealed that most fallow land occurred on soils marginally suited for agriculture, but some areas within our study region contained considerable potential for recultivation. Overall, our study highlights the potential for an improved, more nuanced mapping of agricultural land use by combining imagery of different sensors.

    @Article{stefanski2014mapping,
    title = {Mapping Land Management Regimes in Western Ukraine Using Optical and SAR Data},
    author = {Stefanski, Jan and Kuemmerle, Tobias and Chaskovskyy, Oleh and Griffiths, Patrick and Havryluk, Vassiliy and Knorn, Jan and Korol, Nikolas and Sieber, Anika and Waske, Bj{\"o}rn},
    journal = {Remote Sensing},
    year = {2014},
    number = {6},
    pages = {5279--5305},
    volume = {6},
    abstract = {The global demand for agricultural products is surging due to population growth, more meat-based diets, and the increasing role of bioenergy. Three strategies can increase agricultural production: (1) expanding agriculture into natural ecosystems; (2) intensifying existing farmland; or (3) recultivating abandoned farmland. Because agricultural expansion entails substantial environmental trade-offs, intensification and recultivation are currently gaining increasing attention. Assessing where these strategies may be pursued, however, requires improved spatial information on land use intensity, including where farmland is active and fallow. We developed a framework to integrate optical and radar data in order to advance the mapping of three farmland management regimes: (1) large-scale, mechanized agriculture; (2) small-scale, subsistence agriculture; and (3) fallow or abandoned farmland. We applied this framework to our study area in western Ukraine, a region characterized by marked spatial heterogeneity in management intensity due to the legacies from Soviet land management, the breakdown of the Soviet Union in 1991, and the recent integration of this region into world markets. We mapped land management regimes using a hierarchical, object-based framework. Image segmentation for delineating objects was performed by using the Superpixel Contour algorithm. We then applied Random Forest classification to map land management regimes and validated our map using randomly sampled in-situ data, obtained during an extensive field campaign. Our results showed that farmland management regimes were mapped reliably, resulting in a final map with an overall accuracy of 83.4%. Comparing our land management regimes map with a soil map revealed that most fallow land occurred on soils marginally suited for agriculture, but some areas within our study region contained considerable potential for recultivation. Overall, our study highlights the potential for an improved, more nuanced mapping of agricultural land use by combining imagery of different sensors.},
    doi = {10.3390/rs6065279},
    issn = {2072-4292},
    owner = {JanS},
    }

  • O. Vysotska, B. Frank, I. Ulbert, O. Paul, P. Ruther, C. Stachniss, and W. Burgard, “Automatic Channel Selection and Neural Signal Estimation across Channels of Neural Probes,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Chicago, USA, 2014.
    [BibTeX] [PDF]
    @InProceedings{vysotska2014iros,
    title = {Automatic Channel Selection and Neural Signal Estimation across Channels of Neural Probes},
    author = {O. Vysotska and B. Frank and I. Ulbert and O. Paul and P. Ruther and C. Stachniss and W. Burgard},
    booktitle = iros,
    year = {2014},
    address = {Chicago, USA},
    }

  • V. A. Ziparo, G. Castelli, L. Van Gool, G. Grisetti, B. Leibe, M. Proesmans, and C. Stachniss, “The ROVINA Project. Robots for Exploration, Digital Preservation and Visualization of Archeological sites,” in Proc. of the 18th ICOMOS General Assembly and Scientific Symposium “Heritage and Landscape as Human Values”, 2014.
    [BibTeX]
    [none]
    @InProceedings{ziparo14icomosga,
    title = {The ROVINA Project. Robots for Exploration, Digital Preservation and Visualization of Archeological sites},
    author = {Ziparo, V.A. and Castelli, G. and Van Gool, L. and Grisetti, G. and Leibe, B. and Proesmans, M. and Stachniss, C.},
    booktitle = {Proc. of the 18th ICOMOS General Assembly and Scientific Symposium ``Heritage and Landscape as Human Values"},
    year = {2014},
    abstract = {[none]},
    timestamp = {2015.03.02},
    }

2013

  • N. Abdo, H. Kretzschmar, L. Spinello, and C. Stachniss, “Learning Manipulation Actions from a Few Demonstrations,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Karlsruhe, Germany, 2013.
    [BibTeX] [PDF]
    [none]
    @InProceedings{abdo2013,
    title = {Learning Manipulation Actions from a Few Demonstrations},
    author = {N. Abdo and H. Kretzschmar and L. Spinello and C. Stachniss},
    booktitle = icra,
    year = {2013},
    address = {Karlsruhe, Germany},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/abdo13icra.pdf},
    }

  • P. Agarwal, G. D. Tipaldi, L. Spinello, C. Stachniss, and W. Burgard, “Dynamic Covariance Scaling for Robust Robotic Mapping,” in ICRA Workshop on robust and Multimodal Inference in Factor Graphs, Karlsruhe, Germany, 2013.
    [BibTeX] [PDF]
    [none]
    @InProceedings{agarwal2013,
    title = {Dynamic Covariance Scaling for Robust Robotic Mapping},
    author = {P. Agarwal and G.D. Tipaldi and L. Spinello and C. Stachniss and W. Burgard},
    booktitle = {ICRA Workshop on robust and Multimodal Inference in Factor Graphs},
    year = {2013},
    address = {Karlsruhe, Germany},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/agarwal13icraws.pdf},
    }

  • P. Agarwal, G. D. Tipaldi, L. Spinello, C. Stachniss, and W. Burgard, “Robust Map Optimization using Dynamic Covariance Scaling,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Karlsruhe, Germany, 2013.
    [BibTeX] [PDF]
    [none]
    @InProceedings{agarwal2013a,
    title = {Robust Map Optimization using Dynamic Covariance Scaling},
    author = {P. Agarwal and G.D. Tipaldi and L. Spinello and C. Stachniss and W. Burgard},
    booktitle = icra,
    year = {2013},
    address = {Karlsruhe, Germany},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/agarwal13icra.pdf},
    }

  • K. Böhm, “Tiefenbildsegmentierung mit Hilfe geod\E4tischer Distanztransformation,” bachelor thesis Master Thesis, 2013.
    [BibTeX]
    [none]
    @MastersThesis{bohm2013,
    title = {Tiefenbildsegmentierung mit Hilfe geod\E4tischer Distanztransformation},
    author = {B\"ohm, Karsten},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2013},
    type = {bachelor thesis},
    abstract = {[none]},
    timestamp = {2014.01.20},
    }

  • A. Barth, J. Siegemund, and J. Schwehr, “Fast and precise localization at stop intersections,” in Intelligent Vehicles Symposium Workshops (IV Workshops), Gold Coast, Australia, 2013, p. 75–80.
    [BibTeX] [PDF]

    This article presents a practical solution for fast and precise localization of a vehicle’s position and orientation with respect to stop sign controlled intersections based on video sequences and mapped data. It consists of two steps. First, an intersection map is generated offline based on street-level imagery and GPS data, collected by a vehicle driving through an intersection from different directions. The map contains both landmarks for localization and information about stop line positions. This information is used in the second step to precisely and efficiently derive a vehicle’s pose in real-time when approaching a mapped intersection. At this point, we only need coarse GPS information to be able to load the proper map data.

    @InProceedings{barth2013fast,
    title = {Fast and precise localization at stop intersections},
    author = {Barth, Alexander and Siegemund, Jan and Schwehr, Julian},
    booktitle = {Intelligent Vehicles Symposium Workshops (IV Workshops) },
    year = {2013},
    address = {Gold Coast, Australia},
    pages = {75--80},
    publisher = {IEEE},
    abstract = {This article presents a practical solution for fast and precise localization of a vehicle's position and orientation with respect to stop sign controlled intersections based on video sequences and mapped data. It consists of two steps. First, an intersection map is generated offline based on street-level imagery and GPS data, collected by a vehicle driving through an intersection from different directions. The map contains both landmarks for localization and information about stop line positions. This information is used in the second step to precisely and efficiently derive a vehicle's pose in real-time when approaching a mapped intersection. At this point, we only need coarse GPS information to be able to load the proper map data.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Barth2013Fast.pdf},
    }

  • I. Bogoslavskyi, O. Vysotska, J. Serafin, G. Grisetti, and C. Stachniss, “Efficient Traversability Analysis for Mobile Robots using the Kinect Sensor,” in Proc. of the European Conf. on Mobile Robots (ECMR), Barcelona, Spain, 2013.
    [BibTeX] [PDF]
    [none]
    @InProceedings{bogoslavskyi2013,
    title = {Efficient Traversability Analysis for Mobile Robots using the Kinect Sensor},
    author = {I. Bogoslavskyi and O. Vysotska and J. Serafin and G. Grisetti and C. Stachniss},
    booktitle = ecmr,
    year = {2013},
    address = {Barcelona, Spain},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/bogoslavskyi13ecmr.pdf},
    }

  • W. Burgard and C. Stachniss, “Gestatten, Obelix!,” Forschung – Das Magazin der Deutschen Forschungsgemeinschaft, vol. 1, 2013.
    [BibTeX] [PDF]
    [none]
    @Article{burgard2013,
    title = {Gestatten, Obelix!},
    author = {W. Burgard and C. Stachniss},
    journal = {Forschung -- Das Magazin der Deutschen Forschungsgemeinschaft},
    year = {2013},
    note = {In German, invited},
    volume = {1},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/forschung_2013_01-pg4-9.pdf},
    }

  • D. Chai, W. Förstner, and F. Lafarge, “Recovering Line-Networks in Images by Junction-Point Processes,” in Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition, 2013, pp. 1894-1901. doi:10.1109/CVPR.2013.247
    [BibTeX] [PDF]
    [none]
    @InProceedings{chai13recovering,
    title = {Recovering Line-Networks in Images by Junction-Point Processes},
    author = {D. Chai and W. F\"orstner and F. Lafarge},
    booktitle = {Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition},
    year = {2013},
    pages = {1894-1901},
    abstract = {[none]},
    doi = {10.1109/CVPR.2013.247},
    timestamp = {2015.07.14},
    url = {https://www.ipb.uni-bonn.de/pdfs/chai13recovering.pdf},
    }

  • T. Dickscheid and W. Förstner, “A Trainable Markov Random Field for Low-Level Image Feature Matching with Spatial Relationships,” Photogrammetrie, Fernerkundung, Geoinformation (PFG), vol. 4, p. 269–284, 2013. doi:10.1127/1432-8364/2013/0176
    [BibTeX]

    Many vision applications rely on local features for image analysis, notably in the areas of object recognition, image registration and camera calibration. One important example in photogrammetry are fully automatic algorithms for relative image orientation. Such applications rely on a matching algorithm to extract a sufficient number of correct feature correspondences at acceptable outlier rates, which is most often based on the similarity of feature descriptions. When the number of detected features is low, it is advisable to use multiple feature detectors with complementary properties. When feature similarity is not sufficient for matching, spatial feature relationships provide valuable information. In this work, a highly generic matching algorithm is proposed which is based on a trainable Markov random field (MRF). It is able to incorporate almost arbitrary combinations of features, similarity measures and pairwise spatial relationships, and has a clear statistical interpretation. A major novelty is its ability to compensate for weaknesses in one information cue by implicitely exploiting the strengths of others.

    @Article{dickscheid2013trainable,
    title = {A Trainable Markov Random Field for Low-Level Image Feature Matching with Spatial Relationships},
    author = {Dickscheid, Timo and F\"orstner, Wolfgang},
    journal = {Photogrammetrie, Fernerkundung, Geoinformation (PFG)},
    year = {2013},
    pages = {269--284},
    volume = {4},
    abstract = { Many vision applications rely on local features for image analysis, notably in the areas of object recognition, image registration and camera calibration. One important example in photogrammetry are fully automatic algorithms for relative image orientation. Such applications rely on a matching algorithm to extract a sufficient number of correct feature correspondences at acceptable outlier rates, which is most often based on the similarity of feature descriptions. When the number of detected features is low, it is advisable to use multiple feature detectors with complementary properties. When feature similarity is not sufficient for matching, spatial feature relationships provide valuable information. In this work, a highly generic matching algorithm is proposed which is based on a trainable Markov random field (MRF). It is able to incorporate almost arbitrary combinations of features, similarity measures and pairwise spatial relationships, and has a clear statistical interpretation. A major novelty is its ability to compensate for weaknesses in one information cue by implicitely exploiting the strengths of others. },
    doi = {10.1127/1432-8364/2013/0176},
    }

  • W. Förstner, “Graphical Models in Geodesy and Photogrammetry,” Photogrammetrie, Fernerkundung, Geoinformation (PFG), vol. 4, p. 255–268, 2013. doi:10.1127/1432-8364/2013/0175
    [BibTeX]

    The paper gives an introduction into graphical models and their use in specifying stochastic models in geodesy and photogrammetry. Basic task in adjustment theory can intuitively be described and analysed using graphical models. The paper shows that geodetic networks and bundle adjustments can be interpreted as graphical models, both as Bayesian networks or as conditional random fields. Especially hidden Markov random fields and conditional random fields are demonstrated to be versatile models for parameter estimation and classification.

    @Article{foerstner2013graphical,
    title = {Graphical Models in Geodesy and Photogrammetry},
    author = {F\"orstner, Wolfgang},
    journal = {Photogrammetrie, Fernerkundung, Geoinformation (PFG)},
    year = {2013},
    pages = {255--268},
    volume = {4},
    abstract = { The paper gives an introduction into graphical models and their use in specifying stochastic models in geodesy and photogrammetry. Basic task in adjustment theory can intuitively be described and analysed using graphical models. The paper shows that geodetic networks and bundle adjustments can be interpreted as graphical models, both as Bayesian networks or as conditional random fields. Especially hidden Markov random fields and conditional random fields are demonstrated to be versatile models for parameter estimation and classification. },
    doi = {10.1127/1432-8364/2013/0175},
    }

  • W. Förstner, “Photogrammetrische Forschung – Eine Zwischenbilanz aus Bonner Sicht,” Photogrammetrie, Fernerkundung, Geoinformation (PFG), vol. 4, p. 251–254, 2013. doi:10.1127/1432-8364/2013/0186
    [BibTeX]

    Photogrammetrische Forschung – Eine Zwischenbilanz aus Bonner Sicht

    @Article{foerstner2013photogrammetrische,
    title = {Photogrammetrische Forschung - Eine Zwischenbilanz aus Bonner Sicht},
    author = {F\"orstner, Wolfgang},
    journal = {Photogrammetrie, Fernerkundung, Geoinformation (PFG)},
    year = {2013},
    pages = {251--254},
    volume = {4},
    abstract = {Photogrammetrische Forschung - Eine Zwischenbilanz aus Bonner Sicht},
    doi = {10.1127/1432-8364/2013/0186},
    }

  • A. Hornung, K. M. Wurm, M. Bennewitz, C. Stachniss, and W. Burgard, “OctoMap: An Efficient Probabilistic 3D Mapping Framework Based on Octrees,” Autonomous Robots, vol. 34, pp. 189-206, 2013.
    [BibTeX] [PDF]
    [none]
    @Article{hornung2013,
    title = {{OctoMap}: An Efficient Probabilistic 3D Mapping Framework Based on Octrees},
    author = {A. Hornung and K.M. Wurm and M. Bennewitz and C. Stachniss and W. Burgard},
    journal = auro,
    year = {2013},
    pages = {189-206},
    volume = {34},
    abstract = {[none]},
    issue = {3},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/hornung13auro.pdf},
    }

  • R. Kümmerle, M. Ruhnke, B. Steder, C. Stachniss, and W. Burgard, “A Navigation System for Robots Operating in Crowded Urban Environments,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Karlsruhe, Germany, 2013.
    [BibTeX] [PDF]
    [none]
    @InProceedings{kummerle2013,
    title = {A Navigation System for Robots Operating in Crowded Urban Environments},
    author = {R. K\"ummerle and M. Ruhnke and B. Steder and C. Stachniss and W. Burgard},
    booktitle = icra,
    year = {2013},
    address = {Karlsruhe, Germany},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/kuemmerle13icra.pdf},
    }

  • A. Kicherer, R. Roscher, K. Herzog, S. Šimon, W. Förstner, and R. Töpfer, “BAT (Berry Analysis Tool): A high-throughput image interpretation tool to acquire the number, diameter, and volume of grapevine berries,” Vitis, vol. 52, iss. 3, pp. 129-135, 2013.
    [BibTeX]

    QTL-analysis (quantitative trait loci) and marker development rely on efficient phenotyping techniques. Objectivity and precision of a phenotypic data evaluation is crucial but time consuming. In the present study a high-throughput image interpretation tool was developed to acquire automatically number, size, and volume of grape berries from RGB (red-green-blue) images. Individual berries of one cluster were placed on a defined construction to take a RGB image from the top. The image interpretation of one dataset with an arbitrary number of images occurs automatically by starting the BAT (Berry-Analysis-Tool) developed in MATLAB. For validation of results, the number of berries was counted and their size was measured using a digital calliper. A measuring cylinder was used to determine reliably the berry volume by displacement of water. All placed berries could be counted by BAT 100\A0\% correctly. Manual ratings compared with BAT ratings showed strong correlation of r\A0=\A00,964 for mean berry diameter/image and r\A0=\A00.984 for berry volume.

    @Article{kicherer2013,
    title = {BAT (Berry Analysis Tool): A high-throughput image interpretation tool to acquire the number, diameter, and volume of grapevine berries},
    author = {Kicherer, A. and Roscher, R. and Herzog, K. and {\vS}imon, S. and F\"orstner, W. and T\"opfer, R.},
    journal = {Vitis},
    year = {2013},
    number = {3},
    pages = {129-135},
    volume = {52},
    abstract = {QTL-analysis (quantitative trait loci) and marker development rely on efficient phenotyping techniques. Objectivity and precision of a phenotypic data evaluation is crucial but time consuming. In the present study a high-throughput image interpretation tool was developed to acquire automatically number, size, and volume of grape berries from RGB (red-green-blue) images. Individual berries of one cluster were placed on a defined construction to take a RGB image from the top. The image interpretation of one dataset with an arbitrary number of images occurs automatically by starting the BAT (Berry-Analysis-Tool) developed in MATLAB. For validation of results, the number of berries was counted and their size was measured using a digital calliper. A measuring cylinder was used to determine reliably the berry volume by displacement of water. All placed berries could be counted by BAT 100\A0\% correctly. Manual ratings compared with BAT ratings showed strong correlation of r\A0=\A00,964 for mean berry diameter/image and r\A0=\A00.984 for berry volume.},
    owner = {ribana1},
    timestamp = {2013.08.14},
    }

  • D. Maier, C. Stachniss, and M. Bennewitz, “Vision-Based Humanoid Navigation Using Self-Supervised Obstacle Detection,” The Intl. Journal of Humanoid Robotics (IJHR), vol. 10, 2013.
    [BibTeX] [PDF]
    [none]
    @Article{maier2013,
    title = {Vision-Based Humanoid Navigation Using Self-Supervised Obstacle Detection},
    author = {D. Maier and C. Stachniss and M. Bennewitz},
    journal = ijhr,
    year = {2013},
    volume = {10},
    abstract = {[none]},
    issue = {2},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/maier13ijhr.pdf},
    }

  • M. Nieuwenhuisen, D. Droeschel, J. Schneider, D. Holz, T. Läbe, and S. Behnke, “Multimodal Obstacle Detection and Collision Avoidance for Micro Aerial Vehicles,” in Proc. of the 6th European Conf. on Mobile Robots (ECMR), 2013. doi:10.1109/ECMR.2013.6698812
    [BibTeX] [PDF]

    Reliably perceiving obstacles and avoiding collisions is key for the fully autonomous application of micro aerial vehicles (MAVs). Limiting factors for increasing autonomy and complexity of MAVs (without external sensing and control) are limited onboard sensing and limited onboard processing power. In this paper, we propose a complete system with a multimodal sensor setup for omnidirectional obstacle perception. We developed a lightweight 3D laser scanner setup and visual obstacle detection using wide-angle stereo cameras. Together with our fast reactive collision avoidance approach based on local egocentric grid maps of the environment we aim at safe operation in the vicinity of structures like buildings or vegetation.

    @InProceedings{nieuwenhuisen13ecmr,
    title = {Multimodal Obstacle Detection and Collision Avoidance for Micro Aerial Vehicles},
    author = {Nieuwenhuisen, Matthias and Droeschel, David and Schneider, Johannes and Holz, Dirk and L\"abe, Thomas and Behnke, Sven},
    booktitle = {Proc. of the 6th European Conf. on Mobile Robots (ECMR)},
    year = {2013},
    abstract = {Reliably perceiving obstacles and avoiding collisions is key for the fully autonomous application of micro aerial vehicles (MAVs). Limiting factors for increasing autonomy and complexity of MAVs (without external sensing and control) are limited onboard sensing and limited onboard processing power. In this paper, we propose a complete system with a multimodal sensor setup for omnidirectional obstacle perception. We developed a lightweight 3D laser scanner setup and visual obstacle detection using wide-angle stereo cameras. Together with our fast reactive collision avoidance approach based on local egocentric grid maps of the environment we aim at safe operation in the vicinity of structures like buildings or vegetation.},
    city = {Barcelona},
    doi = {10.1109/ECMR.2013.6698812},
    url = {https://www.ais.uni-bonn.de/papers/ECMR_2013_Nieuwenhuisen_Multimodal_Obstacle_Avoidance.pdf},
    }

  • J. C. Rose, “Automatische Lokalisierung einer Drohne in einer Karte,” Master Thesis, 2013.
    [BibTeX]

    \textbf{Summary} The number of scientific contributions dealing with automatic vision based localization of mobile robots is significant. For a long time contributions have focused on mobile ground robots almost solely but with the new availability of civilly useable UAVs (Unmanned Aerial Vehicles) an interest in adapting the known methods for airworthy vehicles has risen. This work deals with developing a program system called LOCALZE for determining a full 6DOF (Degree of Freedom) position of an UAV in a metric map using vision whereby the metric map is constituted landmarks derived of SIFT-points. Position determination is reached over solving the correspondence problem between SIFT-points detected in the current image of the vision sensor and the landmarks. The potential of LOCALIZE concerning precision and accuracy of the determined position is evaluated in empirical studies using two vision sensors. Experiments demonstrate a dependency of the precision from the quality of the vision sensor. When using a high quality sensor a point error in position determination of about 1-3 cm and an accuracy of 1-7 cm can be reached. \textbf{Zusammenfassung} Die Anzahl wissenschaftlicher Beiträge zur automatischen Lokalisierung mobiler Roboter mittels Bildsensoren ist beträchtlich. Viele Beiträge fokussierten sich dabei lange auf die Untersuchung bodenbeschränkter Roboter. Im Laufe der letzten Jahre wuchs jedoch die Bedeutung der UAVs (Unmanned Aerial Vehicle) auch für zivile Anwendungen und damit das Interesse an einer Adaption der bisherigen Methoden an flugfähigen Robotern. In dieser Arbeit wird ein Programmsystem LOCALIZE für die 3D-Positionsbestimmung (mit allen 6 Freiheitsgraden) eines UAV mittels eines optischen Systems entworfen und sein Potential in empirischen Testszenarien evaluiert. Die Positionierung des Roboters geschieht dabei innerhalb einer a priori erstellten metrischen Karte eines Innenraums, die sich aus über den SIFT-Algorithmus abgeleiteten Landmarken konstituiert. Die Lokalisierung geschieht über Korrespondenzfindung zwischen den im aktuellen Bild des Roboters extrahierten und den Landmarken. Anhand der korrespondierenden Punkte in beiden Systemen wird ein iterativer Räumlicher Rückwärtsschnitt zur Positionsbestimmung verwendet. LOCALIZE wird anhand zweier Bildsensoren hinsichtlich potentieller Präzision und Richtigkeit der Positionsbestimmung untersucht. Die Experimente demonstrieren eine Abhängigkeit der Präzision von der Qualität des Bildsensors. Bei Verwendung eines hochwertigen Bildsensors kann ein Punktfehler der Positionierung von rund 1-3 cm und eine Richtigkeit von 1-7 cm erreicht werden.

    @MastersThesis{rose2013automatische,
    title = {Automatische Lokalisierung einer Drohne in einer Karte},
    author = {Rose, Johann Christian},
    school = {University of Bonn},
    year = {2013},
    note = {Betreuung: Prof. Dr. Bj\"orn Waske, Johannes Schneider},
    abstract = {\textbf{Summary} The number of scientific contributions dealing with automatic vision based localization of mobile robots is significant. For a long time contributions have focused on mobile ground robots almost solely but with the new availability of civilly useable UAVs (Unmanned Aerial Vehicles) an interest in adapting the known methods for airworthy vehicles has risen. This work deals with developing a program system called LOCALZE for determining a full 6DOF (Degree of Freedom) position of an UAV in a metric map using vision whereby the metric map is constituted landmarks derived of SIFT-points. Position determination is reached over solving the correspondence problem between SIFT-points detected in the current image of the vision sensor and the landmarks. The potential of LOCALIZE concerning precision and accuracy of the determined position is evaluated in empirical studies using two vision sensors. Experiments demonstrate a dependency of the precision from the quality of the vision sensor. When using a high quality sensor a point error in position determination of about 1-3 cm and an accuracy of 1-7 cm can be reached. \textbf{Zusammenfassung} Die Anzahl wissenschaftlicher Beitr\"age zur automatischen Lokalisierung mobiler Roboter mittels Bildsensoren ist betr\"achtlich. Viele Beitr\"age fokussierten sich dabei lange auf die Untersuchung bodenbeschr\"ankter Roboter. Im Laufe der letzten Jahre wuchs jedoch die Bedeutung der UAVs (Unmanned Aerial Vehicle) auch f\"ur zivile Anwendungen und damit das Interesse an einer Adaption der bisherigen Methoden an flugf\"ahigen Robotern. In dieser Arbeit wird ein Programmsystem LOCALIZE f\"ur die 3D-Positionsbestimmung (mit allen 6 Freiheitsgraden) eines UAV mittels eines optischen Systems entworfen und sein Potential in empirischen Testszenarien evaluiert. Die Positionierung des Roboters geschieht dabei innerhalb einer a priori erstellten metrischen Karte eines Innenraums, die sich aus \"uber den SIFT-Algorithmus abgeleiteten Landmarken konstituiert. Die Lokalisierung geschieht \"uber Korrespondenzfindung zwischen den im aktuellen Bild des Roboters extrahierten und den Landmarken. Anhand der korrespondierenden Punkte in beiden Systemen wird ein iterativer R\"aumlicher R\"uckw\"artsschnitt zur Positionsbestimmung verwendet. LOCALIZE wird anhand zweier Bildsensoren hinsichtlich potentieller Pr\"azision und Richtigkeit der Positionsbestimmung untersucht. Die Experimente demonstrieren eine Abh\"angigkeit der Pr\"azision von der Qualit\"at des Bildsensors. Bei Verwendung eines hochwertigen Bildsensors kann ein Punktfehler der Positionierung von rund 1-3 cm und eine Richtigkeit von 1-7 cm erreicht werden.},
    city = {Bonn},
    }

  • S. Schallenberg, “Erfassung des Landbedeckungswandels im Rheinischen Braunkohlerevier mittels Landsat-Satellitendaten,” bachelor thesis Master Thesis, 2013.
    [BibTeX]
    [none]
    @MastersThesis{schallenberg2013,
    title = {Erfassung des Landbedeckungswandels im Rheinischen Braunkohlerevier mittels Landsat-Satellitendaten},
    author = {Schallenberg, Sebastian},
    school = {Instiute of Photogrammetry, University of Bonn},
    year = {2013},
    note = {Betreuung: Prof.Dr. Bj\"orn Waske, M.Sc. Jan Stefanski},
    type = {bachelor thesis},
    abstract = {[none]},
    timestamp = {2014.01.20},
    }

  • F. Schindler, “Man-Made Surface Structures from Triangulated Point-Clouds,” PhD Thesis, 2013.
    [BibTeX] [PDF]

    Photogrammetry aims at reconstructing shape and dimensions of objects captured with cameras, 3D laser scanners or other spatial acquisition systems. While many acquisition techniques deliver triangulated point clouds with millions of vertices within seconds, the interpretation is usually left to the user. Especially when reconstructing man-made objects, one is interested in the underlying surface structure, which is not inherently present in the data. This includes the geometric shape of the object, e.g. cubical or cylindrical, as well as corresponding surface parameters, e.g. width, height and radius. Applications are manifold and range from industrial production control to architectural on-site measurements to large-scale city models. The goal of this thesis is to automatically derive such surface structures from triangulated 3D point clouds of man-made objects. They are defined as a compound of planar or curved geometric primitives. Model knowledge about typical primitives and relations between adjacent pairs of them should affect the reconstruction positively. After formulating a parametrized model for man-made surface structures, we develop a reconstruction framework with three processing steps: During a fast pre-segmentation exploiting local surface properties we divide the given surface mesh into planar regions. Making use of a model selection scheme based on minimizing the description length, this surface segmentation is free of control parameters and automatically yields an optimal number of segments. A subsequent refinement introduces a set of planar or curved geometric primitives and hierarchically merges adjacent regions based on their joint description length. A global classification and constraint parameter estimation combines the data-driven segmentation with high-level model knowledge. Therefore, we represent the surface structure with a graphical model and formulate factors based on likelihood as well as prior knowledge about parameter distributions and class probabilities. We infer the most probable setting of surface and relation classes with belief propagation and estimate an optimal surface parametrization with constraints induced by inter-regional relations. The process is specifically designed to work on noisy data with outliers and a few exceptional freeform regions not describable with geometric primitives. It yields full 3D surface structures with watertightly connected surface primitives of different types. The performance of the proposed framework is experimentally evaluated on various data sets. On small synthetically generated meshes we analyze the accuracy of the estimated surface parameters, the sensitivity w.r.t. various properties of the input data and w.r.t. model assumptions as well as the computational complexity. Additionally we demonstrate the flexibility w.r.t. different acquisition techniques on real data sets. The proposed method turns out to be accurate, reasonably fast and little sensitive to defects in the data or imprecise model assumptions.

    @PhDThesis{schindler2013:man-made,
    title = {Man-Made Surface Structures from Triangulated Point-Clouds},
    author = {Schindler, Falko},
    school = {Department of Photogrammetry, University of Bonn},
    year = {2013},
    abstract = {Photogrammetry aims at reconstructing shape and dimensions of objects captured with cameras, 3D laser scanners or other spatial acquisition systems. While many acquisition techniques deliver triangulated point clouds with millions of vertices within seconds, the interpretation is usually left to the user. Especially when reconstructing man-made objects, one is interested in the underlying surface structure, which is not inherently present in the data. This includes the geometric shape of the object, e.g. cubical or cylindrical, as well as corresponding surface parameters, e.g. width, height and radius. Applications are manifold and range from industrial production control to architectural on-site measurements to large-scale city models. The goal of this thesis is to automatically derive such surface structures from triangulated 3D point clouds of man-made objects. They are defined as a compound of planar or curved geometric primitives. Model knowledge about typical primitives and relations between adjacent pairs of them should affect the reconstruction positively. After formulating a parametrized model for man-made surface structures, we develop a reconstruction framework with three processing steps: During a fast pre-segmentation exploiting local surface properties we divide the given surface mesh into planar regions. Making use of a model selection scheme based on minimizing the description length, this surface segmentation is free of control parameters and automatically yields an optimal number of segments. A subsequent refinement introduces a set of planar or curved geometric primitives and hierarchically merges adjacent regions based on their joint description length. A global classification and constraint parameter estimation combines the data-driven segmentation with high-level model knowledge. Therefore, we represent the surface structure with a graphical model and formulate factors based on likelihood as well as prior knowledge about parameter distributions and class probabilities. We infer the most probable setting of surface and relation classes with belief propagation and estimate an optimal surface parametrization with constraints induced by inter-regional relations. The process is specifically designed to work on noisy data with outliers and a few exceptional freeform regions not describable with geometric primitives. It yields full 3D surface structures with watertightly connected surface primitives of different types. The performance of the proposed framework is experimentally evaluated on various data sets. On small synthetically generated meshes we analyze the accuracy of the estimated surface parameters, the sensitivity w.r.t. various properties of the input data and w.r.t. model assumptions as well as the computational complexity. Additionally we demonstrate the flexibility w.r.t. different acquisition techniques on real data sets. The proposed method turns out to be accurate, reasonably fast and little
    sensitive to defects in the data or imprecise model assumptions.},
    timestamp = {2013.11.26},
    url = {https://hss.ulb.uni-bonn.de/2013/3435/3435.htm},
    }

  • F. Schindler, Ein LaTeX-Kochbuch, 2013.
    [BibTeX] [PDF]

    Dieses Dokument fasst die wichtigsten LaTeX-Befehle und -Konstrukte zusammen, die man f\FCr das Verfassen von wissenschaftlichen Arbeiten ben\F6tigt. Auf aktuelle und umfangreichere Dokumentationen wird verwiesen. Auf die Installation von LaTeX und einem Editor (Empfehlung: TeX-Maker) sowie den grunds\E4tzlichen Kompiliervorgang3 wird nicht weiter eingegangen. Alle Beispiele sind vollst\E4ndig aufgef\FChrt und dem Dokument als TEX-Datei angeh\E4ngt (Aufruf \FCber B\FCroklammer-Symbol am Seitenrand). Sie sollten sich problemlos \FCbersetzen lassen und liefern das daneben oder darunter abgebildete Ergebnis. Lediglich die Seitenr\E4nder wurden aus Platzgr\FCnden mehr oder weniger gro\DFz\FCgig abgeschnitten.

    @Misc{schindler2013latex,
    title = {Ein LaTeX-Kochbuch},
    author = {Falko Schindler},
    month = mar,
    year = {2013},
    abstract = {Dieses Dokument fasst die wichtigsten LaTeX-Befehle und -Konstrukte zusammen, die man f\FCr das Verfassen von wissenschaftlichen Arbeiten ben\F6tigt. Auf aktuelle und umfangreichere Dokumentationen wird verwiesen. Auf die Installation von LaTeX und einem Editor (Empfehlung: TeX-Maker) sowie den grunds\E4tzlichen Kompiliervorgang3 wird nicht weiter eingegangen. Alle Beispiele sind vollst\E4ndig aufgef\FChrt und dem Dokument als TEX-Datei angeh\E4ngt (Aufruf \FCber B\FCroklammer-Symbol am Seitenrand). Sie sollten sich problemlos \FCbersetzen lassen und liefern das daneben oder darunter abgebildete Ergebnis. Lediglich die Seitenr\E4nder wurden aus Platzgr\FCnden mehr oder weniger gro\DFz\FCgig abgeschnitten.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schindler2013Latex.pdf:LaTeX},
    }

  • F. Schindler and W. Förstner, “DijkstraFPS: Graph Partitioning in Geometry and Image Processing,” Photogrammetrie, Fernerkundung, Geoinformation (PFG), vol. 4, p. 285–296, 2013. doi:10.1127/1432-8364/2013/0177
    [BibTeX]

    Data partitioning is a common problem in the field of point cloud and image processing applicable to segmentation and clustering. The general principle is to have high similarity of two data points, e.g.pixels or 3D points, within one region and low similarity among regions. This pair-wise similarity between data points can be represented in an attributed graph. In this article we propose a novel graph partitioning algorithm. It integrates a sampling strategy known as farthest point sampling with Dijkstra’s algorithm for deriving a distance transform on a general graph, which does not need to be embedded in some space. According to the pair-wise attributes a Voronoi diagram on the graph is generated yielding the desired segmentation. We demonstrate our approach on various applications such as surface triangulation, surface segmentation, clustering and image segmentation.

    @Article{schindler2013dijkstrafps,
    title = {DijkstraFPS: Graph Partitioning in Geometry and Image Processing},
    author = {Schindler, Falko and F\"orstner, Wolfgang},
    journal = {Photogrammetrie, Fernerkundung, Geoinformation (PFG)},
    year = {2013},
    pages = {285--296},
    volume = {4},
    abstract = { Data partitioning is a common problem in the field of point cloud and image processing applicable to segmentation and clustering. The general principle is to have high similarity of two data points, e.g.pixels or 3D points, within one region and low similarity among regions. This pair-wise similarity between data points can be represented in an attributed graph. In this article we propose a novel graph partitioning algorithm. It integrates a sampling strategy known as farthest point sampling with Dijkstra's algorithm for deriving a distance transform on a general graph, which does not need to be embedded in some space. According to the pair-wise attributes a Voronoi diagram on the graph is generated yielding the desired segmentation. We demonstrate our approach on various applications such as surface triangulation, surface segmentation, clustering and image segmentation. },
    doi = {10.1127/1432-8364/2013/0177},
    }

  • J. Schneider and W. Förstner, “Bundle Adjustment and System Calibration with Points at Infinity for Omnidirectional Camera Systems,” Z. f. Photogrammetrie, Fernerkundung und Geoinformation, vol. 4, p. 309–321, 2013. doi:10.1127/1432-8364/2013/0179
    [BibTeX] [PDF]

    We present a calibration method for multi-view cameras that provides a rigorous maximum likelihood estimation of the mutual orientation of the cameras within a rigid multi-camera system. No calibration targets are needed, just a movement of the multi-camera system taking synchronized images of a highly textured and static scene. Multi-camera systems with non-overlapping views have to be rotated within the scene so that corresponding points are visible in different cameras at different times of exposure. By using an extended version of the projective collinearity equation all estimates can be optimized in one bundle adjustment where we constrain the relative poses of the cameras to be fixed. For stabilizing camera orientations – especially rotations – one should generally use points at the horizon within the bundle adjustment, which classical bundle adjustment programs are not capable of. We use a minimal representation of homogeneous coordinates for image and scene points which allows us to use images of omnidirectional cameras with single viewpoint like fisheye cameras and scene points at a large distance from the camera or even at infinity. We show results of our calibration method on (1) the omnidirectional multi-camera system Ladybug 3 from Point Grey, (2) a camera-rig with five cameras used for the acquisition of complex 3D structures and (3) a camera-rig mounted on a UAV consisting of four fisheye cameras which provide a large field of view and which is used for visual odometry and obstacle detection in the project MoD (DFG-Project FOR 1505 “Mapping on Demand”).

    @Article{schneider13pfg,
    title = {Bundle Adjustment and System Calibration with Points at Infinity for Omnidirectional Camera Systems},
    author = {J. Schneider and W. F\"orstner},
    journal = {Z. f. Photogrammetrie, Fernerkundung und Geoinformation},
    year = {2013},
    pages = {309--321},
    volume = {4},
    abstract = {We present a calibration method for multi-view cameras that provides a rigorous maximum likelihood estimation of the mutual orientation of the cameras within a rigid multi-camera system. No calibration targets are needed, just a movement of the multi-camera system taking synchronized images of a highly textured and static scene. Multi-camera systems with non-overlapping views have to be rotated within the scene so that corresponding points are visible in different cameras at different times of exposure. By using an extended version of the projective collinearity equation all estimates can be optimized in one bundle adjustment where we constrain the relative poses of the cameras to be fixed. For stabilizing camera orientations - especially rotations - one should generally use points at the horizon within the bundle adjustment, which classical bundle adjustment programs are not capable of. We use a minimal representation of homogeneous coordinates for image and scene points which allows us to use images of omnidirectional cameras with single viewpoint like fisheye cameras and scene points at a large distance from the camera or even at infinity. We show results of our calibration method on (1) the omnidirectional multi-camera system Ladybug 3 from Point Grey, (2) a camera-rig with five cameras used for the acquisition of complex 3D structures and (3) a camera-rig mounted on a UAV consisting of four fisheye cameras which provide a large field of view and which is used for visual odometry and obstacle detection in the project MoD (DFG-Project FOR 1505 "Mapping on Demand").},
    doi = {10.1127/1432-8364/2013/0179},
    url = {https://www.dgpf.de/pfg/2013/pfg2013_4_schneider.pdf},
    }

  • J. Schneider, T. Läbe, and W. Förstner, “Incremental Real-time Bundle Adjustment for Multi-camera Systems with Points at Infinity,” in ISPRS Archives of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2013, pp. 355-360. doi:10.5194/isprsarchives-XL-1-W2-355-2013
    [BibTeX] [PDF]

    This paper presents a concept and first experiments on a keyframe-based incremental bundle adjustment for real-time structure and motion estimation in an unknown scene. In order to avoid periodic batch steps, we use the software iSAM2 for sparse nonlinear incremental optimization, which is highly efficient through incremental variable reordering and fluid relinearization. We adapted the software to allow for (1) multi-view cameras by taking the rigid transformation between the cameras into account, (2) omni-directional cameras as it can handle arbitrary bundles of rays and (3) scene points at infinity, which improve the estimation of the camera orientation as points at the horizon can be observed over long periods of time. The real-time bundle adjustment refers to sets of keyframes, consisting of frames, one per camera, taken in a synchronized way, that are initiated if a minimal geometric distance to the last keyframe set is exceeded. It uses interest points in the keyframes as observations, which are tracked in the synchronized video streams of the individual cameras and matched across the cameras, if possible. First experiments show the potential of the incremental bundle adjustment \wrt time requirements. Our experiments are based on a multi-camera system with four fisheye cameras, which are mounted on a UAV as two stereo pairs, one looking ahead and one looking backwards, providing a large field of view.

    @InProceedings{schneider13isprs,
    title = {Incremental Real-time Bundle Adjustment for Multi-camera Systems with Points at Infinity},
    author = {J. Schneider and T. L\"abe and W. F\"orstner},
    booktitle = {ISPRS Archives of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2013},
    pages = {355-360},
    volume = {XL-1/W2},
    abstract = {This paper presents a concept and first experiments on a keyframe-based incremental bundle adjustment for real-time structure and motion estimation in an unknown scene. In order to avoid periodic batch steps, we use the software iSAM2 for sparse nonlinear incremental optimization, which is highly efficient through incremental variable reordering and fluid relinearization. We adapted the software to allow for (1) multi-view cameras by taking the rigid transformation between the cameras into account, (2) omni-directional cameras as it can handle arbitrary bundles of rays and (3) scene points at infinity, which improve the estimation of the camera orientation as points at the horizon can be observed over long periods of time. The real-time bundle adjustment refers to sets of keyframes, consisting of frames, one per camera, taken in a synchronized way, that are initiated if a minimal geometric distance to the last keyframe set is exceeded. It uses interest points in the keyframes as observations, which are tracked in the synchronized video streams of the individual cameras and matched across the cameras, if possible. First experiments show the potential of the incremental bundle adjustment \wrt time requirements. Our experiments are based on a multi-camera system with four fisheye cameras, which are mounted on a UAV as two stereo pairs, one looking ahead and one looking backwards, providing a large field of view.},
    doi = {10.5194/isprsarchives-XL-1-W2-355-2013},
    url = {https://www.int-arch-photogramm-remote-sens-spatial-inf-sci.net/XL-1-W2/355/2013/isprsarchives-XL-1-W2-355-2013.pdf},
    }

  • J. Siegemund, “Street Surfaces and Boundaries from Depth Image Sequences using Probabilistic Models,” PhD Thesis, 2013.
    [BibTeX] [PDF]

    This thesis presents an approach for the detection and reconstruction of street surfaces and boundaries from depth image sequences. Active driver assistance systems which monitor and interpret the environment based on vehicle mounted sensors to support the driver embody a current research focus of the automotive industry. An essential task of these systems is the modeling of the vehicle’s static environment. This comprises the determination of the vertical slope and curvature characteristics of the street surface as well as the robust detection of obstacles and, thus, the free drivable space (alias free-space). In this regard, obstacles of low height, e.g. curbs, are of special interest since they often embody the first geometric delimiter of the free-space. The usage of depth images acquired from stereo camera systems becomes more important in this context due to the high data rate and affordable price of the sensor. However, recent approaches for object detection are often limited to the detection of objects which are distinctive in height, such as cars and guardrails, or explicitly address the detection of particular object classes. These approaches are usually based on extremely restrictive assumptions, such as planar street surfaces, in order to deal with the high measurement noise. The main contribution of this thesis is the development, analysis and evaluation of an approach which detects the free-space in the immediate maneuvering area in front of the vehicle and explicitly models the free-space boundary by means of a spline curve. The approach considers in particular obstacles of low height (higher than 10 cm) without limitation on particular object classes. Furthermore, the approach has the ability to cope with various slope and curvature characteristics of the observed street surface and is able to reconstruct this surface by means of a flexible spline model. In order to allow for robust results despite the flexibility of the model and the high measurement noise, the approach employs probabilistic models for the preprocessing of the depth map data as well as for the detection of the drivable free-space. An elevation model is computed from the depth map considering the paths of the optical rays and the uncertainty of the depth measurements. Based on this elevation model, an iterative two step approach is performed which determines the drivable free-space by means of a Markov Random Field and estimates the spline parameters of the free-space boundary curve and the street surface. Outliers in the elevation data are explicitly modeled. The performance of the overall approach and the influence of key components are systematically evaluated within experiments on synthetic and real world test scenarios. The results demonstrate the ability of the approach to accurately model the boundary of the drivable free-space as well as the street surface even in complex scenarios with multiple obstacles or strong curvature of the street surface. The experiments further reveal the limitations of the approach, which are discussed in detail. Zusammenfassung Sch\E4tzung von Stra\DFenoberfl\E4chen und -begrenzungen aus Sequenzen von Tiefenkarten unter Verwendung probabilistischer Modelle Diese Arbeit pr\E4sentiert ein Verfahren zur Detektion und Rekonstruktion von Stra\DFenoberfl\E4chen und -begrenzungen auf der Basis von Tiefenkarten. Aktive Fahrerassistenzsysteme, welche mit der im Fahrzeug verbauten Sensorik die Umgebung erfassen, interpretieren und den Fahrer unterst\FCtzen, sind ein aktueller Forschungsschwerpunkt der Fahrzeugindustrie. Eine wesentliche Aufgabe dieser Systeme ist die Modellierung der statischen Fahrzeugumgebung. Dies beinhaltet die Bestimmung der vertikalen Neigungs- und Kr\FCmmungseigenschaften der Fahrbahn, sowie die robuste Detektion von Hindernissen und somit des befahrbaren Freiraumes. Hindernisse von geringer H\F6he, wie z.B. Bordsteine, sind in diesem Zusammenhang von besonderem Interesse, da sie h\E4ufig die erste geometrische Begrenzung des Fahrbahnbereiches darstellen. In diesem Kontext gewinnt die Verwendung von Tiefenkarten aus Stereo-Kamera-Systemen wegen der hohen Datenrate und relativ geringen Kosten des Sensors zunehmend an Bedeutung. Aufgrund des starken Messrauschens beschr\E4nken sich herk\F6mmliche Verfahren zur Hinderniserkennung jedoch meist auf erhabene Objekte wie Fahrzeuge oder Leitplanken, oder aber adressieren einzelne Objektklassen wie Bordsteine explizit. Dazu werden h\E4ufig extrem restriktive Annahmen verwendet wie z.B. planare Stra \DFenoberfl\E4chen. Der Hauptbeitrag dieser Arbeit besteht in der Entwicklung, Analyse und Evaluation eines Verfahrens, welches den befahrbaren Freiraum im Nahbereich des Fahrzeugs detektiert und dessen Begrenzung mit Hilfe einer Spline-Kurve explizit modelliert. Das Verfahren ber\FCcksichtigt insbesondere Hindernisse geringer H\F6he (gr\F6\DFer als 10 cm) ohne Beschr\E4nkung auf bestimmte Objektklassen. Weiterhin ist das Verfahren in der Lage, mit verschiedenartigen Neigungs- und Kr\FCmmungseigenschaften der vor dem Fahrzeug liegenden Fahrbahnoberfl\E4che umzugehen und diese durch Verwendung eines flexiblen Spline-Modells zu rekonstruieren. Um trotz der hohen Flexibilit\E4t des Modells und des hohen Messrauschens robuste Ergebnisse zu erzielen, verwendet das Verfahren probabilistische Modelle zur Vorverarbeitung der Eingabedaten und zur Detektion des befahrbaren Freiraumes. Aus den Tiefenkarten wird unter Ber\FCcksichtigung der Strahleng\E4nge und Unsicherheiten der Tiefenmessungen ein H\F6henmodell berechnet. In einem iterativen Zwei-Schritt-Verfahren werden anhand dieses H\F6henmodells der befahrbare Freiraum mit Hilfe eines Markov-Zufallsfeldes bestimmt sowie die Parameter der begrenzenden Spline-Kurve und Stra \DFenoberfl\E4che gesch\E4tzt. Ausrei\DFer in den H\F6hendaten werden dabei explizit modelliert. Die Leistungsf\E4higkeit des Gesamtverfahrens sowie der Einfluss zentraler Komponenten, wird im Rahmen von Experimenten auf synthetischen und realen Testszenen systematisch analysiert. Die Ergebnisse demonstrieren die F\E4higkeit des Verfahrens, die Begrenzung des befahrbaren Freiraumes sowie die Fahrbahnoberfl\E4che selbst in komplexen Szenarien mit multiplen Hindernissen oder starker Fahrbahnkr\FCmmung akkurat zu modellieren. Weiterhin werden die Grenzen des Verfahrens aufgezeigt und detailliert untersucht.

    @PhDThesis{siegemund2013,
    title = {Street Surfaces and Boundaries from Depth Image Sequences using Probabilistic Models},
    author = {Siegemund, Jan},
    school = {Department of Photogrammetry, University of Bonn},
    year = {2013},
    abstract = {This thesis presents an approach for the detection and reconstruction of street surfaces and boundaries from depth image sequences. Active driver assistance systems which monitor and interpret the environment based on vehicle mounted sensors to support the driver embody a current research focus of the automotive industry. An essential task of these systems is the modeling of the vehicle's static environment. This comprises the determination of the vertical slope and curvature characteristics of the street surface as well as the robust detection of obstacles and, thus, the free drivable space (alias free-space). In this regard, obstacles of low height, e.g. curbs, are of special interest since they often embody the first geometric delimiter of the free-space. The usage of depth images acquired from stereo camera systems becomes more important in this context due to the high data rate and affordable price of the sensor. However, recent approaches for object detection are often limited to the detection of objects which are distinctive in height, such as cars and guardrails, or explicitly address the detection of particular object classes. These approaches are usually based on extremely restrictive assumptions, such as planar street surfaces, in order to deal with the high measurement noise. The main contribution of this thesis is the development, analysis and evaluation of an approach which detects the free-space in the immediate maneuvering area in front of the vehicle and explicitly models the free-space boundary by means of a spline curve. The approach considers in particular obstacles of low height (higher than 10 cm) without limitation on particular object classes. Furthermore, the approach has the ability to cope with various slope and curvature characteristics of the observed street surface and is able to reconstruct this surface by means of a flexible spline model. In order to allow for robust results despite the flexibility of the model and the high measurement noise, the approach employs probabilistic models for the preprocessing of the depth map data as well as for the detection of the drivable free-space. An elevation model is computed from the depth map considering the paths of the optical rays and the uncertainty of the depth measurements. Based on this elevation model, an iterative two step approach is performed which determines the drivable free-space by means of a Markov Random Field and estimates the spline parameters of the free-space boundary curve and the street surface. Outliers in the elevation data are explicitly modeled. The performance of the overall approach and the influence of key components are systematically evaluated within experiments on synthetic and real world test scenarios. The results demonstrate the ability of the approach to accurately model the boundary of the drivable free-space as well as the street surface even in complex scenarios with multiple obstacles or strong curvature of the
    street surface. The experiments further reveal the limitations of the approach, which are discussed in detail. Zusammenfassung Sch\E4tzung von Stra\DFenoberfl\E4chen und -begrenzungen aus Sequenzen von Tiefenkarten unter Verwendung probabilistischer Modelle Diese Arbeit pr\E4sentiert ein Verfahren zur Detektion und Rekonstruktion von Stra\DFenoberfl\E4chen und -begrenzungen auf der Basis von Tiefenkarten. Aktive Fahrerassistenzsysteme, welche mit der im Fahrzeug verbauten Sensorik die Umgebung erfassen, interpretieren und den Fahrer unterst\FCtzen, sind ein aktueller Forschungsschwerpunkt der Fahrzeugindustrie. Eine wesentliche Aufgabe dieser Systeme ist die Modellierung der statischen Fahrzeugumgebung. Dies beinhaltet die Bestimmung der vertikalen Neigungs- und Kr\FCmmungseigenschaften der Fahrbahn, sowie die robuste Detektion von Hindernissen und somit des befahrbaren Freiraumes. Hindernisse von geringer H\F6he, wie z.B. Bordsteine, sind in diesem Zusammenhang von besonderem Interesse, da sie h\E4ufig die erste geometrische Begrenzung des Fahrbahnbereiches darstellen. In diesem Kontext gewinnt die Verwendung von Tiefenkarten aus Stereo-Kamera-Systemen wegen der hohen Datenrate und relativ geringen Kosten des Sensors zunehmend an Bedeutung. Aufgrund des starken Messrauschens beschr\E4nken sich herk\F6mmliche Verfahren zur Hinderniserkennung jedoch meist auf erhabene Objekte wie Fahrzeuge oder Leitplanken, oder aber adressieren einzelne Objektklassen wie Bordsteine explizit. Dazu werden h\E4ufig extrem restriktive Annahmen verwendet wie z.B. planare Stra \DFenoberfl\E4chen. Der Hauptbeitrag dieser Arbeit besteht in der Entwicklung, Analyse und Evaluation eines Verfahrens, welches den befahrbaren Freiraum im Nahbereich des Fahrzeugs detektiert und dessen Begrenzung mit Hilfe einer Spline-Kurve explizit modelliert. Das Verfahren ber\FCcksichtigt insbesondere Hindernisse geringer H\F6he (gr\F6\DFer als 10 cm) ohne Beschr\E4nkung auf bestimmte Objektklassen. Weiterhin ist das Verfahren in der Lage, mit verschiedenartigen Neigungs- und Kr\FCmmungseigenschaften der vor dem Fahrzeug liegenden Fahrbahnoberfl\E4che umzugehen und diese durch Verwendung eines flexiblen Spline-Modells zu rekonstruieren. Um trotz der hohen Flexibilit\E4t des Modells und des hohen Messrauschens robuste Ergebnisse zu erzielen, verwendet das Verfahren probabilistische Modelle zur Vorverarbeitung der Eingabedaten und zur Detektion des befahrbaren Freiraumes. Aus den Tiefenkarten wird unter Ber\FCcksichtigung der Strahleng\E4nge und Unsicherheiten der Tiefenmessungen ein H\F6henmodell berechnet. In einem iterativen Zwei-Schritt-Verfahren werden anhand dieses H\F6henmodells der befahrbare Freiraum mit Hilfe eines Markov-Zufallsfeldes bestimmt sowie die Parameter der begrenzenden Spline-Kurve und Stra \DFenoberfl\E4che gesch\E4tzt. Ausrei\DFer in den H\F6hendaten werden dabei explizit modelliert. Die Leistungsf\E4higkeit des Gesamtverfahrens sowie der Einfluss
    zentraler Komponenten, wird im Rahmen von Experimenten auf synthetischen und realen Testszenen systematisch analysiert. Die Ergebnisse demonstrieren die F\E4higkeit des Verfahrens, die Begrenzung des befahrbaren Freiraumes sowie die Fahrbahnoberfl\E4che selbst in komplexen Szenarien mit multiplen Hindernissen oder starker Fahrbahnkr\FCmmung akkurat zu modellieren. Weiterhin werden die Grenzen des Verfahrens aufgezeigt und detailliert untersucht.},
    timestamp = {2013.10.07},
    url = {https://hss.ulb.uni-bonn.de/2013/3436/3436.htm},
    }

  • J. Stefanski, B. Mack, and B. Waske, “Optimization of object-based image analysis with Random Forests for land cover mapping,” IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. 6, iss. 6, p. 2492–2504, 2013. doi:10.1109/JSTARS.2013.2253089
    [BibTeX]

    A prerequisite for object-based image analysis is the generation of adequate segments. However, the parameters for the image segmentation algorithms are often manually defined. Therefore, the generation of an ideal segmentation level is usually costly and user-depended. In this paper a strategy for a semi-automatic optimization of object-based classification of multitemporal data is introduced by using Random Forests (RF) and a novel segmentation algorithm. The Superpixel Contour (SPc) algorithm is used to generate a set of different levels of segmentation, using various combinations of parameters in a user-defined range. Finally, the best parameter combination is selected based on the cross-validation-like out-of-bag (OOB) error that is provided by RF. Therefore, the quality of the parameters and the corresponding segmentation level can be assessed in terms of the classification accuracy, without providing additional independent test data. To evaluate the potential of the proposed concept, we focus on land cover classification of two study areas, using multitemporal RapidEye and SPOT 5 images. A classification that is based on eCognition’s widely used Multiresolution Segmentation algorithm (MRS) is used for comparison. Experimental results underline that the two segmentation algorithms SPc and MRS perform similar in terms of accuracy and visual interpretation. The proposed strategy that uses the OOB error for the selection of the ideal segmentation level provides similar classification accuracies, when compared to the results achieved by manual-based image segmentation. Overall, the proposed strategy is operational and easy to handle and thus economizes the findings of optimal segmentation parameters for the Superpixel Contour algorithm.

    @Article{stefanski2013optimization,
    title = {Optimization of object-based image analysis with Random Forests for land cover mapping},
    author = {Stefanski, Jan and Mack, Benjamin and Waske, Bj\"orn},
    journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing},
    year = {2013},
    number = {6},
    pages = {2492--2504},
    volume = {6},
    abstract = {A prerequisite for object-based image analysis is the generation of adequate segments. However, the parameters for the image segmentation algorithms are often manually defined. Therefore, the generation of an ideal segmentation level is usually costly and user-depended. In this paper a strategy for a semi-automatic optimization of object-based classification of multitemporal data is introduced by using Random Forests (RF) and a novel segmentation algorithm. The Superpixel Contour (SPc) algorithm is used to generate a set of different levels of segmentation, using various combinations of parameters in a user-defined range. Finally, the best parameter combination is selected based on the cross-validation-like out-of-bag (OOB) error that is provided by RF. Therefore, the quality of the parameters and the corresponding segmentation level can be assessed in terms of the classification accuracy, without providing additional independent test data. To evaluate the potential of the proposed concept, we focus on land cover classification of two study areas, using multitemporal RapidEye and SPOT 5 images. A classification that is based on eCognition's widely used Multiresolution Segmentation algorithm (MRS) is used for comparison. Experimental results underline that the two segmentation algorithms SPc and MRS perform similar in terms of accuracy and visual interpretation. The proposed strategy that uses the OOB error for the selection of the ideal segmentation level provides similar classification accuracies, when compared to the results achieved by manual-based image segmentation. Overall, the proposed strategy is operational and easy to handle and thus economizes the findings of optimal segmentation parameters for the Superpixel Contour algorithm.},
    doi = {10.1109/JSTARS.2013.2253089},
    issn = {1939-1404},
    owner = {JanS},
    timestamp = {2013.03.14},
    }

  • S. Wenzel and W. Förstner, “Finding Poly-Curves of Straight Line and Ellipse Segments in Images,” Photogrammetrie, Fernerkundung, Geoinformation (PFG), vol. 4, p. 297–308, 2013. doi:10.1127/1432-8364/2013/0178
    [BibTeX]

    Simplification of given polygons has attracted many researchers. Especially, finding circular and elliptical structures in images is relevant in many applications. Given pixel chains from edge detection, this paper proposes a method to segment them into straight line and ellipse segments. We propose an adaption of Douglas-Peucker’s polygon simplification algorithm using circle segments instead of straight line segments and partition the sequence of points instead the sequence of edges. It is robust and decreases the complexity of given polygons better than the original algorithm. In a second step, we further simplify the poly-curve by merging neighbouring segments to straight line and ellipse segments. Merging is based on the evaluation of variation of entropy for proposed geometric models, which turns out as a combination of hypothesis testing and model selection. We demonstrate the results of {\tt circlePeucker} as well as merging on several images of scenes with significant circular structures and compare them with the method of {\sc Patraucean} et al. (2012).

    @Article{wenzel2013finding,
    title = {Finding Poly-Curves of Straight Line and Ellipse Segments in Images},
    author = {Wenzel, Susanne and F\"orstner, Wolfgang},
    journal = {Photogrammetrie, Fernerkundung, Geoinformation (PFG)},
    year = {2013},
    pages = {297--308},
    volume = {4},
    abstract = {Simplification of given polygons has attracted many researchers. Especially, finding circular and elliptical structures in images is relevant in many applications. Given pixel chains from edge detection, this paper proposes a method to segment them into straight line and ellipse segments. We propose an adaption of Douglas-Peucker's polygon simplification algorithm using circle segments instead of straight line segments and partition the sequence of points instead the sequence of edges. It is robust and decreases the complexity of given polygons better than the original algorithm. In a second step, we further simplify the poly-curve by merging neighbouring segments to straight line and ellipse segments. Merging is based on the evaluation of variation of entropy for proposed geometric models, which turns out as a combination of hypothesis testing and model selection. We demonstrate the results of {\tt circlePeucker} as well as merging on several images of scenes with significant circular structures and compare them with the method of {\sc Patraucean} et al. (2012).},
    doi = {10.1127/1432-8364/2013/0178},
    file = {Technical Report:Wenzel2013Finding.pdf},
    }

  • S. Wenzel and W. Förstner, “Finding Poly-Curves of Straight Line and Ellipse Segments in Images,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2013-02, 2013.
    [BibTeX] [PDF]

    Simplification of given polygons has attracted many researchers. Especially, finding circular and elliptical structures in images is relevant in many applications. Given pixel chains from edge detection, this paper proposes a method to segment them into straight line and ellipse segments. We propose an adaption of Douglas-Peucker’s polygon simplification algorithm using circle segments instead of straight line segments and partition the sequence of points instead the sequence of edges. It is robust and decreases the complexity of given polygons better than the original algorithm. In a second step, we further simplify the poly-curve by merging neighbouring segments to straight line and ellipse segments. Merging is based on the evaluation of variation of entropy for proposed geometric models, which turns out as a combination of hypothesis testing and model selection. We demonstrate the results of {\tt circlePeucker} as well as merging on several images of scenes with significant circular structures and compare them with the method of {\sc Patraucean} et al. (2012).

    @TechReport{wenzel2013findingtr,
    title = {Finding Poly-Curves of Straight Line and Ellipse Segments in Images},
    author = {Wenzel, Susanne and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2013},
    month = {July},
    number = {TR-IGG-P-2013-02},
    abstract = {Simplification of given polygons has attracted many researchers. Especially, finding circular and elliptical structures in images is relevant in many applications. Given pixel chains from edge detection, this paper proposes a method to segment them into straight line and ellipse segments. We propose an adaption of Douglas-Peucker's polygon simplification algorithm using circle segments instead of straight line segments and partition the sequence of points instead the sequence of edges. It is robust and decreases the complexity of given polygons better than the original algorithm. In a second step, we further simplify the poly-curve by merging neighbouring segments to straight line and ellipse segments. Merging is based on the evaluation of variation of entropy for proposed geometric models, which turns out as a combination of hypothesis testing and model selection. We demonstrate the results of {\tt circlePeucker} as well as merging on several images of scenes with significant circular structures and compare them with the method of {\sc Patraucean} et al. (2012).},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2013Finding.pdf},
    }

  • K. M. Wurm, C. Dornhege, B. Nebel, W. Burgard, and C. Stachniss, “Coordinating Heterogeneous Teams of Robots using Temporal Symbolic Planning,” Autonomous Robots, vol. 34, 2013.
    [BibTeX] [PDF]
    [none]
    @Article{wurm2013,
    title = {Coordinating Heterogeneous Teams of Robots using Temporal Symbolic Planning},
    author = {K.M. Wurm and C. Dornhege and B. Nebel and W. Burgard and C. Stachniss},
    journal = auro,
    year = {2013},
    volume = {34},
    abstract = {[none]},
    issue = {4},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm13auro.pdf},
    }

  • K. M. Wurm, H. Kretzschmar, R. Kümmerle, C. Stachniss, and W. Burgard, “Identifying Vegetation from Laser Data in Structured Outdoor Environments,” Robotics and Autonomous Systems, 2013.
    [BibTeX] [PDF]
    [none]
    @Article{wurm2013a,
    title = {Identifying Vegetation from Laser Data in Structured Outdoor Environments},
    author = {K.M. Wurm and H. Kretzschmar and R. K{\"u}mmerle and C. Stachniss and W. Burgard},
    journal = jras,
    year = {2013},
    note = {In press},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm13ras.pdf},
    }

2012

  • N. Abdo, H. Kretzschmar, and C. Stachniss, “From Low-Level Trajectory Demonstrations to Symbolic Actions for Planning,” in Proc. of the ICAPS Workshop on Combining Task and Motion Planning for Real-World Applications (TAMPRA), 2012.
    [BibTeX] [PDF]
    [none]
    @InProceedings{abdo2012,
    title = {From Low-Level Trajectory Demonstrations to Symbolic Actions for Planning},
    author = {N. Abdo and H. Kretzschmar and C. Stachniss},
    booktitle = {Proc. of the ICAPS Workshop on Combining Task and Motion Planning for Real-World Applications (TAMPRA)},
    year = {2012},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/abdo12tampra.pdf},
    }

  • P. A. Becker, “3D Rekonstruktion symmetrischer Objekte aus Tiefenbildern,” bachelor thesis Master Thesis, 2012.
    [BibTeX]

    none

    @MastersThesis{becker2012rekonstruktion,
    title = {3D Rekonstruktion symmetrischer Objekte aus Tiefenbildern},
    author = {Becker, Philip Alexander},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2012},
    type = {bachelor thesis},
    abstract = {none},
    timestamp = {2013.04.16},
    }

  • D. Chai, W. Förstner, and M. Ying Yang, “Combine Markov Random Fields and Marked Point Processes to extract Building from Remotely Sensed Images,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2012. doi:10.5194/isprsannals-I-3-365-2012
    [BibTeX] [PDF]

    Automatic building extraction from remotely sensed images is a research topic much more significant than ever. One of the key issues is object and image representation. Markov random fields usually referring to the pixel level can not represent high-level knowledge well. On the contrary, marked point processes can not represent low-level information well even though they are a powerful model at object level. We propose to combine Markov random fields and marked point processes to represent both low-level information and high-level knowledge, and present a combined framework of modelling and estimation for building extraction from single remotely sensed image. At high level, rectangles are used to represent buildings, and a marked point process is constructed to represent the buildings on ground scene. Interactions between buildings are introduced into the the model to represent their relationships. At the low level, a MRF is used to represent the statistics of the image appearance. Histograms of colours are adopted to represent the building’s appearance. The high-level model and the low-level model are combined by establishing correspondences between marked points and nodes of the MRF. We adopt reversible jump Markov Chain Monte Carlo (RJMCMC) techniques to explore the configuration space at the high level, and adopt a Graph Cut algorithm to optimize configuration at the low level. We propose a top-down schema to use results from high level to guide the optimization at low level, and propose a bottom-up schema to use results from low level to drive the sampling at high level. Experimental results demonstrate that better results can be achieved by adopting such hybrid representation.

    @InProceedings{chai*12:combine,
    title = {Combine Markov Random Fields and Marked Point Processes to extract Building from Remotely Sensed Images},
    author = {Chai, D. and F\"orstner, W. and Ying Yang, M.},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2012},
    abstract = {Automatic building extraction from remotely sensed images is a research topic much more significant than ever. One of the key issues is object and image representation. Markov random fields usually referring to the pixel level can not represent high-level knowledge well. On the contrary, marked point processes can not represent low-level information well even though they are a powerful model at object level. We propose to combine Markov random fields and marked point processes to represent both low-level information and high-level knowledge, and present a combined framework of modelling and estimation for building extraction from single remotely sensed image. At high level, rectangles are used to represent buildings, and a marked point process is constructed to represent the buildings on ground scene. Interactions between buildings are introduced into the the model to represent their relationships. At the low level, a MRF is used to represent the statistics of the image appearance. Histograms of colours are adopted to represent the building's appearance. The high-level model and the low-level model are combined by establishing correspondences between marked points and nodes of the MRF. We adopt reversible jump Markov Chain Monte Carlo (RJMCMC) techniques to explore the configuration space at the high level, and adopt a Graph Cut algorithm to optimize configuration at the low level. We propose a top-down schema to use results from high level to guide the optimization at low level, and propose a bottom-up schema to use results from low level to drive the sampling at high level. Experimental results demonstrate that better results can be achieved by adopting such hybrid representation.},
    doi = {10.5194/isprsannals-I-3-365-2012},
    timestamp = {2015.07.09},
    url = {https://www.ipb.uni-bonn.de/pdfs/isprsannals-I-3-365-2012.pdf},
    }

  • W. Förstner, “Minimal Representations for Testing and Estimation in Projective Spaces,” Z. f. Photogrammetrie, Fernerkundung und Geoinformation, vol. 3, p. 209–220, 2012. doi:10.1127/1432-8364/2012/0112
    [BibTeX]

    Testing and estimation using homogeneous coordinates and matrices has to cope with obstacles such as singularities of covariance matrices and redundant parametrizations. The paper proposes a representation of the uncertainty of all types of geometric entities which (1) only requires the minimum number of parameters, (2) is free of singularities, (3) enables to exploit the simplicity of homogeneous coordinates to represent geometric constraints and (4) allows to handle geometric entities which are at infinity or at least very far away. We develop the concept, discuss its usefulness for bundle adjustment and demonstrate its applicability for determining 3D lines from observed image line segments in a multi view setup.

    @Article{forstner2012minimal,
    title = {Minimal Representations for Testing and Estimation in Projective Spaces},
    author = {F\"orstner, Wolfgang},
    journal = {Z. f. Photogrammetrie, Fernerkundung und Geoinformation},
    year = {2012},
    pages = {209--220},
    volume = {3},
    abstract = {Testing and estimation using homogeneous coordinates and matrices has to cope with obstacles such as singularities of covariance matrices and redundant parametrizations. The paper proposes a representation of the uncertainty of all types of geometric entities which (1) only requires the minimum number of parameters, (2) is free of singularities, (3) enables to exploit the simplicity of homogeneous coordinates to represent geometric constraints and (4) allows to handle geometric entities which are at infinity or at least very far away. We develop the concept, discuss its usefulness for bundle adjustment and demonstrate its applicability for determining 3D lines from observed image line segments in a multi view setup.},
    doi = {10.1127/1432-8364/2012/0112},
    file = {Technical Report:Forstner2012Minimal.pdf},
    timestamp = {2013.01.09},
    }

  • W. Förstner, “Minimal Representations for Testing and Estimation in Projective Spaces,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2012-03, 2012.
    [BibTeX] [PDF]

    Testing and estimation using homogeneous coordinates and matrices has to cope with obstacles such as singularities of covariance matrices and redundant parametrizations. The paper proposes a representation of the uncertainty of all types of geometric entities which (1) only requires the minimum number of parameters, (2) is free of singularities, (3) enables to exploit the simplicity of homogeneous coordinates to represent geometric constraints and (4) allows to handle geometric entities which are at infinity or at least very far away. We develop the concept, discuss its usefulness for bundle adjustment and demonstrate its applicability for determining 3D lines from observed image line segments in a multi view setup.

    @TechReport{forstner2012minimalreport,
    title = {Minimal Representations for Testing and Estimation in Projective Spaces},
    author = {F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2012},
    number = {TR-IGG-P-2012-03},
    abstract = {Testing and estimation using homogeneous coordinates and matrices has to cope with obstacles such as singularities of covariance matrices and redundant parametrizations. The paper proposes a representation of the uncertainty of all types of geometric entities which (1) only requires the minimum number of parameters, (2) is free of singularities, (3) enables to exploit the simplicity of homogeneous coordinates to represent geometric constraints and (4) allows to handle geometric entities which are at infinity or at least very far away. We develop the concept, discuss its usefulness for bundle adjustment and demonstrate its applicability for determining 3D lines from observed image line segments in a multi view setup.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2012Minimal.pdf},
    }

  • S. Gehrig, A. Barth, N. Schneider, and J. Siegemund, “A Multi-Cue Approach for Stereo-Based Object Confidence Estimation,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Vilamoura, Portugal, 2012, p. 3055 – 3060. doi:10.1109/IROS.2012.6385455
    [BibTeX]

    In this contribution we present an approach to compute object confidences for stereo-vision-based object tracking schemes. Meaningful object confidences help to reduce false alarm rates of safety systems and improve the downstream system performance for modules such as sensor fusion and situation analysis. Several cues from stereo vision and from the tracking process are fused in a Bayesian manner. An evaluation on a 38,000 frames urban drive shows the effectiveness of the approach compared to the same object tracking scheme with simple heuristics for the object confidence. Within the evaluation, also the relevance of occurring phantoms is considered by computing the collision risk. The proposed confidence measures reduce the number of predicted imminent collisions from 86 to 0 maintaining almost the same system availability.

    @InProceedings{gehrig2012multi,
    title = {A Multi-Cue Approach for Stereo-Based Object Confidence Estimation},
    author = {Gehrig, Stefan and Barth, Alexander and Schneider, Nicolai and Siegemund, Jan},
    booktitle = iros,
    year = {2012},
    address = {Vilamoura, Portugal},
    pages = {3055 -- 3060},
    abstract = {In this contribution we present an approach to compute object confidences for stereo-vision-based object tracking schemes. Meaningful object confidences help to reduce false alarm rates of safety systems and improve the downstream system performance for modules such as sensor fusion and situation analysis. Several cues from stereo vision and from the tracking process are fused in a Bayesian manner. An evaluation on a 38,000 frames urban drive shows the effectiveness of the approach compared to the same object tracking scheme with simple heuristics for the object confidence. Within the evaluation, also the relevance of occurring phantoms is considered by computing the collision risk. The proposed confidence measures reduce the number of predicted imminent collisions from 86 to 0 maintaining almost the same system availability.},
    doi = {10.1109/IROS.2012.6385455},
    }

  • G. Grisetti, L. Iocchi, B. Leibe, V. A. Ziparo, and C. Stachniss, “Digitization of Inaccessible Archeological Sites with Autonomous Mobile Robots,” in Conf. on Robotics Innovation for Cultural Heritage, 2012.
    [BibTeX]
    [none]
    @InProceedings{grisetti2012,
    title = {Digitization of Inaccessible Archeological Sites with Autonomous Mobile Robots},
    author = {G. Grisetti and L. Iocchi and B. Leibe and V.A. Ziparo and C. Stachniss},
    booktitle = {Conf. on Robotics Innovation for Cultural Heritage},
    year = {2012},
    abstract = {[none]},
    notes = {Extended abstract},
    timestamp = {2014.04.24},
    }

  • M. Hans, “Die Verbesserung einer Bildsegmentierung unter Verwendung von 3D Merkmalen,” bachelor thesis Master Thesis, 2012.
    [BibTeX] [PDF]

    Ziel einer partionellen Bildsegmentierung ist die Einteilung eines Bildes in Regionen. Dabei wird jedes Pixel zu je einer Region zugeordnet. Liegen ungünstige Beleuchtungsverhältnisse im Bild vor, ist eine Segmentierung einzig basierend auf Bilddaten nicht ausreichend, da aneinandergrenzende Objekteile mit ähnlichen Farbwerten nicht unterschieden werden können. Mit Hilfe von 3D-Merkmalen können wir solche Bildsegmentierungen verbessern. Dabei liegt der Fokus der Arbeit auf segmentierten Luftbildern mit Dachflächen. Mit der Annahme, dass sich die Dächer aus Flächen erster Ordnung zusammensetzen, werden in den vorsegmentierten Bildregionen zunächst zwei Ebenen in den zugeordneten Punkten einer 3D-Punktwolke geschätzt. Hierzu wird der random sample consensus (RANSAC, Fischler and Bolles (1981)) verwendet. Wir beschränken uns auf die Trennkante zweier Dachflächen, die in einem bekannten Winkel $\varphi$ zueinander stehen und die gleiche Neigung haben. Die Berechnung der Ebenenparameter ist somit bereits mit vier geeigneten Punkten der Objektkoordinaten möglich. Mit den geschätzten Ebenen in der Punktwolke segmentierte Bildregion kann diese aufgesplittet werden. Hierzu wenden wir ein lineares diskriminatives Modell an, um eine lineare Kante als Trennung in der Bildsegmentierung einzeichnen zu können. Eine visuelle Evaluierung der Ergebnisse zeigt, dass die hier vorgestellten Verfahren eine Trennung der Dachregionen an einer sinnvollen Stelle ermöglichen. Dabei werden die Verfahren an Bildern mit unterschiedlichen Dachformen getestet. Die Leistungsfähigkeit der Verfahren hängt vor Allem von der Punktkonfiguration der von RANSAC ausgewählten Punkte ab. Diese Arbeit beschreibt uns somit Verfahren, die eine regionenbasierende Segmentierung von Dachflächen auf Luftbildern unter der Verwendung von 3D Merkmalen verbessern.

    @MastersThesis{hans2010die,
    title = {Die Verbesserung einer Bildsegmentierung unter Verwendung von 3D Merkmalen},
    author = {Hans, Mathias},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2012},
    note = {Betreuung: Prof. Dr.-Ing Wolfgang F\"orstner, Dipl.-Ing. Ribana Roscher},
    type = {bachelor thesis},
    abstract = {Ziel einer partionellen Bildsegmentierung ist die Einteilung eines Bildes in Regionen. Dabei wird jedes Pixel zu je einer Region zugeordnet. Liegen ung\"unstige Beleuchtungsverh\"altnisse im Bild vor, ist eine Segmentierung einzig basierend auf Bilddaten nicht ausreichend, da aneinandergrenzende Objekteile mit \"ahnlichen Farbwerten nicht unterschieden werden k\"onnen. Mit Hilfe von 3D-Merkmalen k\"onnen wir solche Bildsegmentierungen verbessern. Dabei liegt der Fokus der Arbeit auf segmentierten Luftbildern mit Dachfl\"achen. Mit der Annahme, dass sich die D\"acher aus Fl\"achen erster Ordnung zusammensetzen, werden in den vorsegmentierten Bildregionen zun\"achst zwei Ebenen in den zugeordneten Punkten einer 3D-Punktwolke gesch\"atzt. Hierzu wird der random sample consensus (RANSAC, Fischler and Bolles (1981)) verwendet. Wir beschr\"anken uns auf die Trennkante zweier Dachfl\"achen, die in einem bekannten Winkel $\varphi$ zueinander stehen und die gleiche Neigung haben. Die Berechnung der Ebenenparameter ist somit bereits mit vier geeigneten Punkten der Objektkoordinaten m\"oglich. Mit den gesch\"atzten Ebenen in der Punktwolke segmentierte Bildregion kann diese aufgesplittet werden. Hierzu wenden wir ein lineares diskriminatives Modell an, um eine lineare Kante als Trennung in der Bildsegmentierung einzeichnen zu k\"onnen. Eine visuelle Evaluierung der Ergebnisse zeigt, dass die hier vorgestellten Verfahren eine Trennung der Dachregionen an einer sinnvollen Stelle erm\"oglichen. Dabei werden die Verfahren an Bildern mit unterschiedlichen Dachformen getestet. Die Leistungsf\"ahigkeit der Verfahren h\"angt vor Allem von der Punktkonfiguration der von RANSAC ausgew\"ahlten Punkte ab. Diese Arbeit beschreibt uns somit Verfahren, die eine regionenbasierende Segmentierung von Dachfl\"achen auf Luftbildern unter der Verwendung von 3D Merkmalen verbessern.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Hans2010Die.pdf},
    }

  • D. Joho, G. D. Tipaldi, N. Engelhard, C. Stachniss, and W. Burgard, “Nonparametric Bayesian Models for Unsupervised Scene Analysis and Reconstruction,” in Proc. of Robotics: Science and Systems (RSS), 2012.
    [BibTeX] [PDF]
    [none]
    @InProceedings{joho2012,
    title = {Nonparametric {B}ayesian Models for Unsupervised Scene Analysis and Reconstruction},
    author = {D. Joho and G.D. Tipaldi and N. Engelhard and C. Stachniss and W. Burgard},
    booktitle = rss,
    year = {2012},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/joho12rss.pdf},
    }

  • S. Klemenjak, B. Waske, S. Valero, and J. Chanussot, “Automatic Detection of Rivers in High-Resolution SAR Data,” IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. 5, iss. 5, p. 1364–1372, 2012. doi:10.1109/JSTARS.2012.2189099
    [BibTeX]

    Remote sensing plays a major role in supporting decision-making and surveying compliance of several multilateral environmental treaties. In this paper, we present an approach for supporting monitoring compliance of river networks in context of the European Water Framework Directive. Only a few approaches have been developed for extracting river networks from satellite data and usually they require manual input, which seems not feasible for automatic and operational application. We propose a method for the automatic extraction of river structures in TerraSAR-X data. The method is based on mathematical morphology and supervised image classification, using automatically selected training samples. The method is applied on TerraSAR-X images from two different study sites. In addition, the results are compared to an alternative method, which requires manual user interaction. The detailed accuracy assessment shows that the proposed method achieves accurate results (Kappa $ {sim}$ 0.7) and performs almost similar in terms of accuracy, when compared to the alternative approach. Moreover, the proposed method can be applied on various datasets (e.g., multitemporal, multisensoral and multipolarized) and does not require any additional user input. Thus, the highly flexible approach is interesting in terms of operational monitoring systems and large scale applications.

    @Article{klemenjak2012automatic,
    title = {Automatic Detection of Rivers in High-Resolution SAR Data},
    author = {Klemenjak, Sascha and Waske, Bj\"orn and Valero, Sivia and Chanussot, Jocelyn},
    journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing},
    year = {2012},
    month = oct,
    number = {5},
    pages = {1364--1372},
    volume = {5},
    abstract = {Remote sensing plays a major role in supporting decision-making and surveying compliance of several multilateral environmental treaties. In this paper, we present an approach for supporting monitoring compliance of river networks in context of the European Water Framework Directive. Only a few approaches have been developed for extracting river networks from satellite data and usually they require manual input, which seems not feasible for automatic and operational application. We propose a method for the automatic extraction of river structures in TerraSAR-X data. The method is based on mathematical morphology and supervised image classification, using automatically selected training samples. The method is applied on TerraSAR-X images from two different study sites. In addition, the results are compared to an alternative method, which requires manual user interaction. The detailed accuracy assessment shows that the proposed method achieves accurate results (Kappa $ {sim}$ 0.7) and performs almost similar in terms of accuracy, when compared to the alternative approach. Moreover, the proposed method can be applied on various datasets (e.g., multitemporal, multisensoral and multipolarized) and does not require any additional user input. Thus, the highly flexible approach is interesting in terms of operational monitoring systems and large scale applications.},
    doi = {10.1109/JSTARS.2012.2189099},
    issn = {1939-1404},
    owner = {waske},
    timestamp = {2012.09.06},
    }

  • F. Korč, “Tractable Learning for a Class of Global Discriminative Models for Context Sensitive Image Interpretation,” PhD Thesis, 2012.
    [BibTeX] [PDF]
    [none]
    @PhDThesis{korvc2012tractable,
    title = {Tractable Learning for a Class of Global Discriminative Models for Context Sensitive Image Interpretation},
    author = {Kor{\vc}, Filip},
    school = {Department of Photogrammetry, University of Bonn},
    year = {2012},
    abstract = {[none]},
    url = {https://hss.ulb.uni-bonn.de/2012/3010/3010.htm},
    }

  • H. Kretzschmar and C. Stachniss, “Information-Theoretic Pose Graph Compression for Laser-based SLAM,” The Intl. Journal of Robotics Research, vol. 31, p. 1219–1230, 2012.
    [BibTeX] [PDF]
    [none]
    @Article{kretzschmar2012,
    title = {Information-Theoretic Pose Graph Compression for Laser-based {SLAM}},
    author = {H. Kretzschmar and C. Stachniss},
    journal = ijrr,
    year = {2012},
    pages = {1219--1230},
    volume = {31},
    abstract = {[none]},
    issue = {11},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/kretzschmar12ijrr.pdf},
    }

  • J. Roewekaemper, C. Sprunk, G. D. Tipaldi, C. Stachniss, P. Pfaff, and W. Burgard, “On the Position Accuracy of Mobile Robot Localization based on Particle Filters combined with Scan Matching,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2012.
    [BibTeX] [PDF]
    [none]
    @InProceedings{roewekaemper2012,
    title = {On the Position Accuracy of Mobile Robot Localization based on Particle Filters combined with Scan Matching},
    author = {J. Roewekaemper and C. Sprunk and G.D. Tipaldi and C. Stachniss and P. Pfaff and W. Burgard},
    booktitle = iros,
    year = {2012},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://ais.informatik.uni-freiburg.de/publications/papers/roewekaemper12iros.pdf},
    }

  • R. Roscher, “Sequential Learning using Incremental Import Vector Machines for Semantic Segmentation,” PhD Thesis, 2012.
    [BibTeX] [PDF]

    We propose an innovative machine learning algorithm called incremental import vector machines that is used for classification purposes. The classifier is specifically designed for the task of sequential learning, in which the data samples are successively presented to the classifier. The motivation for our work comes from the effort to formulate a classifier that can manage the major challenges of sequential learning problems, while being a powerful classifier in terms of classification accuracy, efficiency and meaningful output. One challenge of sequential learning is that data samples are not completely available to the learner at a given point of time and generally, waiting for a representative number of data is undesirable and impractical. Thus, in order to allow for a classification of given data samples at any time, the learning phase of the classifier model needs to start immediately, even if not all training samples are available. Another challenge is that the number of sequential arriving data samples can be very large or even infinite and thus, not all samples can be stored. Furthermore, the distribution of the sample can vary over time and the classifier model needs to remain stable and unchanged to irrelevant samples while being plastic to new, important samples. Therefore our key contribution is to develop, analyze and evaluate a powerful incremental learner for sequential learning which we call incremental import vector machines (I2VMs). The classifier is based on the batch machine learning algorithm import vector machines, which was developed by Zhu and Hastie (2005). I2VM is a kernel-based, discriminative classifier and thus, is able to deal with complex data distributions. Additionally ,the learner is sparse for an efficient training and testing and has a probabilistic output. A key achievement of this thesis is the verification and analysis of the discriminative and reconstructive model components of IVM and I2VM. While discriminative classifiers try to separate the classes as well as possible, classifiers with a reconstructive component aspire to have a high information content in order to approximate the distribution of the data samples. Both properties are necessary for a powerful incremental classifier. A further key achievement is the formulation of the incremental learning strategy of I2VM. The strategy deals with adding and removing data samples and the update of the current set of model parameters. Furthermore, also new classes and features can be incorporated. The learning strategy adapts the model continuously, while keeping it stable and efficient. In our experiments we use I2VM for the semantic segmentation of images from an image database, for large area land cover classification of overlapping remote sensing images and for object tracking in image sequences. We show that I2VM results in superior or competitive classification accuracies to comparable classifiers. A substantial achievement of the thesis is that I2VM’s performance is independent of the ordering of the data samples and a reconsidering of already encountered samples for learning is not necessary. A further achievement is that I2VM is able to deal with very long data streams without a loss in the efficiency. Furthermore, as another achievement, we show that I2VM provide reliable posterior probabilities since samples with high class probabilities are accurately classified, whereas relatively low class probabilities are more likely referred to misclassified samples.

    @PhDThesis{roscher2012sequential,
    title = {Sequential Learning using Incremental Import Vector Machines for Semantic Segmentation},
    author = {Roscher, Ribana},
    school = {Department of Photogrammetry, University of Bonn},
    year = {2012},
    abstract = {We propose an innovative machine learning algorithm called incremental import vector machines that is used for classification purposes. The classifier is specifically designed for the task of sequential learning, in which the data samples are successively presented to the classifier. The motivation for our work comes from the effort to formulate a classifier that can manage the major challenges of sequential learning problems, while being a powerful classifier in terms of classification accuracy, efficiency and meaningful output. One challenge of sequential learning is that data samples are not completely available to the learner at a given point of time and generally, waiting for a representative number of data is undesirable and impractical. Thus, in order to allow for a classification of given data samples at any time, the learning phase of the classifier model needs to start immediately, even if not all training samples are available. Another challenge is that the number of sequential arriving data samples can be very large or even infinite and thus, not all samples can be stored. Furthermore, the distribution of the sample can vary over time and the classifier model needs to remain stable and unchanged to irrelevant samples while being plastic to new, important samples. Therefore our key contribution is to develop, analyze and evaluate a powerful incremental learner for sequential learning which we call incremental import vector machines (I2VMs). The classifier is based on the batch machine learning algorithm import vector machines, which was developed by Zhu and Hastie (2005). I2VM is a kernel-based, discriminative classifier and thus, is able to deal with complex data distributions. Additionally ,the learner is sparse for an efficient training and testing and has a probabilistic output. A key achievement of this thesis is the verification and analysis of the discriminative and reconstructive model components of IVM and I2VM. While discriminative classifiers try to separate the classes as well as possible, classifiers with a reconstructive component aspire to have a high information content in order to approximate the distribution of the data samples. Both properties are necessary for a powerful incremental classifier. A further key achievement is the formulation of the incremental learning strategy of I2VM. The strategy deals with adding and removing data samples and the update of the current set of model parameters. Furthermore, also new classes and features can be incorporated. The learning strategy adapts the model continuously, while keeping it stable and efficient. In our experiments we use I2VM for the semantic segmentation of images from an image database, for large area land cover classification of overlapping remote sensing images and for object tracking in image sequences. We show that I2VM results in superior or competitive classification accuracies to comparable classifiers. A substantial achievement of the
    thesis is that I2VM's performance is independent of the ordering of the data samples and a reconsidering of already encountered samples for learning is not necessary. A further achievement is that I2VM is able to deal with very long data streams without a loss in the efficiency. Furthermore, as another achievement, we show that I2VM provide reliable posterior probabilities since samples with high class probabilities are accurately classified, whereas relatively low class probabilities are more likely referred to misclassified samples.},
    city = {Bonn},
    url = {https://hss.ulb.uni-bonn.de/2012/3009/3009.htm},
    }

  • R. Roscher, W. Förstner, and B. Waske, “I²VM: Incremental import vector machines,” Image and Vision Computing, vol. 30, iss. 4-5, p. 263–278, 2012. doi:10.1016/j.imavis.2012.04.004
    [BibTeX]

    We introduce an innovative incremental learner called incremental import vector machines ((IVM)-V-2). The kernel-based discriminative approach is able to deal with complex data distributions. Additionally, the learner is sparse for an efficient training and testing and has a probabilistic output. We particularly investigate the reconstructive component of import vector machines, in order to use it for robust incremental teaming. By performing incremental update steps, we are able to add and remove data samples, as well as update the current set of model parameters for incremental learning. By using various standard benchmarks, we demonstrate how (IVM)-V-2 is competitive or superior to other incremental methods. It is also shown that our approach is capable of managing concept-drifts in the data distributions. (C) 2012 Elsevier B.V. All rights reserved.

    @Article{roscher2012i2vm,
    title = {I²VM: Incremental import vector machines},
    author = {Roscher, Ribana and F\"orstner, Wolfgang and Waske, Bj\"orn},
    journal = {Image and Vision Computing},
    year = {2012},
    month = may,
    number = {4-5},
    pages = {263--278},
    volume = {30},
    abstract = {We introduce an innovative incremental learner called incremental import vector machines ((IVM)-V-2). The kernel-based discriminative approach is able to deal with complex data distributions. Additionally, the learner is sparse for an efficient training and testing and has a probabilistic output. We particularly investigate the reconstructive component of import vector machines, in order to use it for robust incremental teaming. By performing incremental update steps, we are able to add and remove data samples, as well as update the current set of model parameters for incremental learning. By using various standard benchmarks, we demonstrate how (IVM)-V-2 is competitive or superior to other incremental methods. It is also shown that our approach is capable of managing concept-drifts in the data distributions. (C) 2012 Elsevier B.V. All rights reserved.},
    doi = {10.1016/j.imavis.2012.04.004},
    owner = {waske},
    sn = {0262-8856},
    tc = {0},
    timestamp = {2012.09.04},
    ut = {WOS:000305726700001},
    z8 = {0},
    z9 = {0},
    zb = {0},
    }

  • R. Roscher, J. Siegemund, F. Schindler, and W. Förstner, “Object Tracking by Segmentation Using Incremental Import Vector Machines,” Department of Photogrammetry, University of Bonn 2012.
    [BibTeX] [PDF]

    We propose a framework for object tracking in image sequences, following the concept of tracking-by-segmentation. The separation of object and background is achieved by a consecutive semantic superpixel segmentation of the images, yielding tight object boundaries. I.e., in the first image a model of the object’s characteristics is learned from an initial, incomplete annotation. This model is used to classify the superpixels of subsequent images to object and background employing graph-cut. We assume the object boundaries to be tight-fitting and the object motion within the image to be affine. To adapt the model to radiometric and geometric changes we utilize an incremental learner in a co-training scheme. We evaluate our tracking framework qualitatively and quantitatively on several image sequences.

    @TechReport{roscher2012object,
    title = {Object Tracking by Segmentation Using Incremental Import Vector Machines},
    author = {Roscher, Ribana and Siegemund, Jan and Schindler, Falko and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2012},
    abstract = {We propose a framework for object tracking in image sequences, following the concept of tracking-by-segmentation. The separation of object and background is achieved by a consecutive semantic superpixel segmentation of the images, yielding tight object boundaries. I.e., in the first image a model of the object's characteristics is learned from an initial, incomplete annotation. This model is used to classify the superpixels of subsequent images to object and background employing graph-cut. We assume the object boundaries to be tight-fitting and the object motion within the image to be affine. To adapt the model to radiometric and geometric changes we utilize an incremental learner in a co-training scheme. We evaluate our tracking framework qualitatively and quantitatively on several image sequences.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2012Object.pdf},
    }

  • R. Roscher, B. Waske, and W. Förstner, “Evaluation of Import Vector Machines for Classifying Hyperspectral Data,” Department of Photogrammetry, University of Bonn 2012.
    [BibTeX] [PDF]

    We evaluate the performance of Import Vector Machines (IVM),a sparse Kernel Logistic Regression approach, for the classification of hyperspectral data. The IVM classifier is applied on two different data sets, using different number of training samples. The performance of IVM to Support Vector Machines (SVM) is compared in terms of accuracy and sparsity. Moreover, the impact of the training sample set on the accuracy and stability of IVM was investigated. The results underline that the IVM perform similar when compared to the popular SVM in terms of accuracy. Moreover, the number of import vectors from the IVM is significantly lower when compared to the number of support vectors from the SVM. Thus, the classification process of the IVM is faster. These findings are independent from the study site, the number of training samples and specific classes. Consequently, the proposed IVM approach is a promising classification method for hyperspectral imagery.

    @TechReport{roscher2012evaluation,
    title = {Evaluation of Import Vector Machines for Classifying Hyperspectral Data},
    author = {Roscher, Ribana and Waske, Bj\"orn and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2012},
    abstract = {We evaluate the performance of Import Vector Machines (IVM),a sparse Kernel Logistic Regression approach, for the classification of hyperspectral data. The IVM classifier is applied on two different data sets, using different number of training samples. The performance of IVM to Support Vector Machines (SVM) is compared in terms of accuracy and sparsity. Moreover, the impact of the training sample set on the accuracy and stability of IVM was investigated. The results underline that the IVM perform similar when compared to the popular SVM in terms of accuracy. Moreover, the number of import vectors from the IVM is significantly lower when compared to the number of support vectors from the SVM. Thus, the classification process of the IVM is faster. These findings are independent from the study site, the number of training samples and specific classes. Consequently, the proposed IVM approach is a promising classification method for hyperspectral imagery.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2012Evaluation.pdf},
    }

  • R. Roscher, B. Waske, and W. Förstner, “Incremental Import Vector Machines for Classifying Hyperspectral Data,” IEEE Transactions on Geoscience and Remote Sensing, vol. 50, iss. 9, p. 3463–3473, 2012. doi:10.1109/TGRS.2012.2184292
    [BibTeX]

    In this paper, we propose an incremental learning strategy for import vector machines (IVM), which is a sparse kernel logistic regression approach. We use the procedure for the concept of self-training for sequential classification of hyperspectral data. The strategy comprises the inclusion of new training samples to increase the classification accuracy and the deletion of noninformative samples to be memory and runtime efficient. Moreover, we update the parameters in the incremental IVM model without retraining from scratch. Therefore, the incremental classifier is able to deal with large data sets. The performance of the IVM in comparison to support vector machines (SVM) is evaluated in terms of accuracy, and experiments are conducted to assess the potential of the probabilistic outputs of the IVM. Experimental results demonstrate that the IVM and SVM perform similar in terms of classification accuracy. However, the number of import vectors is significantly lower when compared to the number of support vectors, and thus, the computation time during classification can be decreased. Moreover, the probabilities provided by IVM are more reliable, when compared to the probabilistic information, derived from an SVM’s output. In addition, the proposed self-training strategy can increase the classification accuracy. Overall, the IVM and its incremental version is worthwhile for the classification of hyperspectral data.

    @Article{roscher2012incremental,
    title = {Incremental Import Vector Machines for Classifying Hyperspectral Data},
    author = {Roscher, Ribana and Waske, Bj\"orn and F\"orstner, Wolfgang},
    journal = {IEEE Transactions on Geoscience and Remote Sensing},
    year = {2012},
    month = sep,
    number = {9},
    pages = {3463--3473},
    volume = {50},
    abstract = {In this paper, we propose an incremental learning strategy for import vector machines (IVM), which is a sparse kernel logistic regression approach. We use the procedure for the concept of self-training for sequential classification of hyperspectral data. The strategy comprises the inclusion of new training samples to increase the classification accuracy and the deletion of noninformative samples to be memory and runtime efficient. Moreover, we update the parameters in the incremental IVM model without retraining from scratch. Therefore, the incremental classifier is able to deal with large data sets. The performance of the IVM in comparison to support vector machines (SVM) is evaluated in terms of accuracy, and experiments are conducted to assess the potential of the probabilistic outputs of the IVM. Experimental results demonstrate that the IVM and SVM perform similar in terms of classification accuracy. However, the number of import vectors is significantly lower when compared to the number of support vectors, and thus, the computation time during classification can be decreased. Moreover, the probabilities provided by IVM are more reliable, when compared to the probabilistic information, derived from an SVM's output. In addition, the proposed self-training strategy can increase the classification accuracy. Overall, the IVM and its incremental version is worthwhile for the classification of hyperspectral data.},
    doi = {10.1109/TGRS.2012.2184292},
    issn = {0196-2892},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • F. Schindler and W. Förstner, “Real-time Camera Guidance for 3d Scene Reconstruction,” in ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences, 2012.
    [BibTeX] [PDF]

    We propose a framework for multi-view stereo reconstruction exploiting the possibility to interactively guiding the operator during the image acquisition process. Multi-view stereo is a commonly used method to reconstruct both camera trajectory and 3D object shape. After determining an initial solution, a globally optimal reconstruction is usually obtained by executing a bundle adjustment involving all images. Acquiring suitable images, however, still requires an experienced operator to ensure accuracy and completeness of the final solution. We propose an interactive framework for guiding unexperienced users or possibly an autonomous robot. Using approximate camera orientations and object points we estimate point uncertainties within a sliding bundle adjustment and suggest appropriate camera movements. A visual feedback system communicates the decisions to the user in an intuitive way. We demonstrate the suitability of our system with a virtual image acquisition simulation as well as in real-world scenarios. We show that following the camera movements suggested by our system the final scene reconstruction with the automatically extracted key frames is both more complete and more accurate. Possible applications are non-professional 3D acquisition systems on low-cost platforms like mobile phones, autonomously navigating robots as well as online flight planning of unmanned aerial vehicles.

    @InProceedings{schindler2012real,
    title = {Real-time Camera Guidance for 3d Scene Reconstruction},
    author = {Falko Schindler and Wolfgang F\"orstner},
    booktitle = {ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2012},
    volume = {I-3},
    abstract = {We propose a framework for multi-view stereo reconstruction exploiting the possibility to interactively guiding the operator during the image acquisition process. Multi-view stereo is a commonly used method to reconstruct both camera trajectory and 3D object shape. After determining an initial solution, a globally optimal reconstruction is usually obtained by executing a bundle adjustment involving all images. Acquiring suitable images, however, still requires an experienced operator to ensure accuracy and completeness of the final solution. We propose an interactive framework for guiding unexperienced users or possibly an autonomous robot. Using approximate camera orientations and object points we estimate point uncertainties within a sliding bundle adjustment and suggest appropriate camera movements. A visual feedback system communicates the decisions to the user in an intuitive way. We demonstrate the suitability of our system with a virtual image acquisition simulation as well as in real-world scenarios. We show that following the camera movements suggested by our system the final scene reconstruction with the automatically extracted key frames is both more complete and more accurate. Possible applications are non-professional 3D acquisition systems on low-cost platforms like mobile phones, autonomously navigating robots as well as online flight planning of unmanned aerial vehicles.},
    keywords = {Three-dimensional Reconstruction, Bundle Adjustment, Camera Orientation, Real-time Planning},
    url = {https://www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net/I-3/69/2012/isprsannals-I-3-69-2012.pdf},
    }

  • J. Schneider, F. Schindler, T. Läbe, and W. Förstner, “Bundle Adjustment for Multi-camera Systems with Points at Infinity,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2012, p. 75–80. doi:10.5194/isprsannals-I-3-75-2012
    [BibTeX] [PDF]

    We present a novel approach for a rigorous bundle adjustment for omnidirectional and multi-view cameras, which enables an efficient maximum-likelihood estimation with image and scene points at infinity. Multi-camera systems are used to increase the resolution, to combine cameras with different spectral sensitivities (Z/I DMC, Vexcel Ultracam) or – like omnidirectional cameras – to augment the effective aperture angle (Blom Pictometry, Rollei Panoscan Mark III). Additionally multi-camera systems gain in importance for the acquisition of complex 3D structures. For stabilizing camera orientations – especially rotations – one should generally use points at the horizon over long periods of time within the bundle adjustment that classical bundle adjustment programs are not capable of. We use a minimal representation of homogeneous coordinates for image and scene points. Instead of eliminating the scale factor of the homogeneous vectors by Euclidean normalization, we normalize the homogeneous coordinates spherically. This way we can use images of omnidirectional cameras with single-view point like fisheye cameras and scene points, which are far away or at infinity. We demonstrate the feasibility and the potential of our approach on real data taken with a single camera, the stereo camera FinePix Real 3D W3 from Fujifilm and the multi-camera system Ladybug3 from Point Grey.

    @InProceedings{schneider12isprs,
    title = {Bundle Adjustment for Multi-camera Systems with Points at Infinity},
    author = {J. Schneider and F. Schindler and T. L\"abe and W. F\"orstner},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2012},
    pages = {75--80},
    volume = {I-3},
    abstract = {We present a novel approach for a rigorous bundle adjustment for omnidirectional and multi-view cameras, which enables an efficient maximum-likelihood estimation with image and scene points at infinity. Multi-camera systems are used to increase the resolution, to combine cameras with different spectral sensitivities (Z/I DMC, Vexcel Ultracam) or - like omnidirectional cameras - to augment the effective aperture angle (Blom Pictometry, Rollei Panoscan Mark III). Additionally multi-camera systems gain in importance for the acquisition of complex 3D structures. For stabilizing camera orientations - especially rotations - one should generally use points at the horizon over long periods of time within the bundle adjustment that classical bundle adjustment programs are not capable of. We use a minimal representation of homogeneous coordinates for image and scene points. Instead of eliminating the scale factor of the homogeneous vectors by Euclidean normalization, we normalize the homogeneous coordinates spherically. This way we can use images of omnidirectional cameras with single-view point like fisheye cameras and scene points, which are far away or at infinity. We demonstrate the feasibility and the potential of our approach on real data taken with a single camera, the stereo camera FinePix Real 3D W3 from Fujifilm and the multi-camera system Ladybug3 from Point Grey.},
    city = {Melbourne},
    doi = {10.5194/isprsannals-I-3-75-2012},
    url = {https://www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net/I-3/75/2012/isprsannals-I-3-75-2012.pdf},
    }

  • L. Spinello, C. Stachniss, and W. Burgard, “Scene in the Loop: Towards Adaptation-by-Tracking in RGB-D Data,” in Proc. of the RSS Workshop RGB-D: Advanced Reasoning with Depth Cameras, 2012.
    [BibTeX] [PDF]
    [none]
    @InProceedings{spinello2012,
    title = {Scene in the Loop: Towards Adaptation-by-Tracking in RGB-D Data},
    author = {L. Spinello and C. Stachniss and W. Burgard},
    booktitle = {Proc. of the RSS Workshop RGB-D: Advanced Reasoning with Depth Cameras},
    year = {2012},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/spinello12rssws.pdf},
    }

  • T. Stroth, “Kartierung landwirtschaftlicher Kulturarten mittels multitemporaler RapidEye und TerraSAR-X Daten,” bachelor thesis Master Thesis, 2012.
    [BibTeX]

    none

    @MastersThesis{stroth2012kartierung,
    title = {Kartierung landwirtschaftlicher Kulturarten mittels multitemporaler RapidEye und TerraSAR-X Daten},
    author = {Stroth, Tobias},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2012},
    type = {bachelor thesis},
    abstract = {none},
    timestamp = {2013.04.15},
    }

  • B. Waske, J. Benediktsson, and J. Sveinsson, “Signal and Image Processing for Remote Sensing,” in Signal and Image Processing for Remote Sensing, Second Edition, 2nd ed., C. H. Chen, Ed., CRC Press, 2012, p. 365–374. doi:10.1201/b11656-21
    [BibTeX]

    Land cover classifications are perhaps the widest used application in context of remote sensing. The recent development of remote sensing systems, including numerous bands, high spatial resolution and increased repetition rates as well as the availability of more diverse remote sensing imagery increase the potential of remote sensing based land cover classifications. Nevertheless, recent data sets demand more sophisticated classifiers and the development of adequate methods in an ongoing research topic in the field of remote sensing. In this context the potential of the ensemble technique Random Forest (RF) for classifying hyperspectral and multisensor remote sensing data is demonstrated. The classification is done on two different data sets, comprising of (i) multispectral and SAR data and (ii) hyperspectral imagery. The results are compared to well known algorithms (e.g. Maximum Likelihood Classifier, Spectral Angle Mapper) as well as recent developments such as Support Vector Machines (SVM). Overall the results demonstrate that RF can be considered desirable for classification of hyperspectral as well as multisensor data sets. RF, significantly outperforms common methods in terms of accuracy and is comparable to SVM. RF achieve high accuracies, even with small training sample, and is simple to handle, because it mainly depends on two user-defined values.

    @InBook{waske2012signal,
    title = {Signal and Image Processing for Remote Sensing},
    author = {Waske, Bj\"orn and Benediktsson, Jon and Sveinsson, Johannes},
    chapter = {Random Forest Classification of Remote Sensing Data},
    editor = {Chen, Chi Hau},
    pages = {365--374},
    publisher = {CRC Press},
    year = {2012},
    edition = {2nd},
    month = feb,
    abstract = {Land cover classifications are perhaps the widest used application in context of remote sensing. The recent development of remote sensing systems, including numerous bands, high spatial resolution and increased repetition rates as well as the availability of more diverse remote sensing imagery increase the potential of remote sensing based land cover classifications. Nevertheless, recent data sets demand more sophisticated classifiers and the development of adequate methods in an ongoing research topic in the field of remote sensing. In this context the potential of the ensemble technique Random Forest (RF) for classifying hyperspectral and multisensor remote sensing data is demonstrated. The classification is done on two different data sets, comprising of (i) multispectral and SAR data and (ii) hyperspectral imagery. The results are compared to well known algorithms (e.g. Maximum Likelihood Classifier, Spectral Angle Mapper) as well as recent developments such as Support Vector Machines (SVM). Overall the results demonstrate that RF can be considered desirable for classification of hyperspectral as well as multisensor data sets. RF, significantly outperforms common methods in terms of accuracy and is comparable to SVM. RF achieve high accuracies, even with small training sample, and is simple to handle, because it mainly depends on two user-defined values.},
    booktitle = {Signal and Image Processing for Remote Sensing, Second Edition},
    doi = {10.1201/b11656-21},
    issn = {978-1-4398-5596-6},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • B. Waske, S. van der Linden, C. Oldenburg, B. Jakimow, A. Rabe, and P. Hostert, “imageRF – A user-oriented implementation for remote sensing image analysis with Random Forests,” Environmental Modelling & Software, vol. 35, p. 192–193, 2012. doi:10.1016/j.envsoft.2012.01.014
    [BibTeX]

    An IDL implementation for the classification and regression analysis of remote sensing images with Random Forests is introduced. The tool, called imageRF, is platform and license independent and uses generic image file formats. It works well with default parameterization, yet all relevant parameters can be defined in intuitive GUIs. This makes it a user-friendly image processing tool, which is implemented as an add-on in the free EnMAP-Box and may be used in the commercial IDL/ENVI software. (C) 2012 Elsevier Ltd. All rights reserved.

    @Article{waske2012imagerf,
    title = {imageRF -- A user-oriented implementation for remote sensing image analysis with Random Forests},
    author = {Waske, Bj\"orn and van der Linden, Sebastian and Oldenburg, Carsten and Jakimow, Benjamin and Rabe, Andreas and Hostert, Patrick},
    journal = {Environmental Modelling \& Software},
    year = {2012},
    month = jul,
    pages = {192--193},
    volume = {35},
    abstract = {An IDL implementation for the classification and regression analysis of remote sensing images with Random Forests is introduced. The tool, called imageRF, is platform and license independent and uses generic image file formats. It works well with default parameterization, yet all relevant parameters can be defined in intuitive GUIs. This makes it a user-friendly image processing tool, which is implemented as an add-on in the free EnMAP-Box and may be used in the commercial IDL/ENVI software. (C) 2012 Elsevier Ltd. All rights reserved.},
    doi = {10.1016/j.envsoft.2012.01.014},
    owner = {waske},
    sn = {1364-8152},
    tc = {0},
    timestamp = {2012.09.04},
    ut = {WOS:000304217500017},
    z8 = {0},
    z9 = {0},
    zb = {0},
    }

  • S. Wenzel and W. Förstner, “Learning a compositional representation for facade object categorization,” in ISPRS Annals of Photogrammetry, Remote Sensing and the Spatial Information Sciences; Proc. of 22nd Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS), 2012, p. 197–202. doi:10.5194/isprsannals-I-3-197-2012
    [BibTeX] [PDF]

    Our objective is the categorization of the most dominant objects in facade images, like windows, entrances and balconies. In order to execute an image interpretation of complex scenes we need an interaction between low level bottom-up feature detection and highlevel inference from top-down. A top-down approach would use results of a bottom-up detection step as evidence for some high-level inference of scene interpretation. We present a statistically founded object categorization procedure that is suited for bottom-up object detection. Instead of choosing a bag of features in advance and learning models based on these features, it is more natural to learn which features best describe the target object classes. Therefore we learn increasingly complex aggregates of line junctions in image sections from man-made scenes. We present a method for the classification of image sections by using the histogram of diverse types of line aggregates.

    @InProceedings{wenzel2012learning,
    title = {Learning a compositional representation for facade object categorization},
    author = {Wenzel, Susanne and F\"orstner, Wolfgang},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and the Spatial Information Sciences; Proc. of 22nd Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS)},
    year = {2012},
    number = { 2012},
    pages = {197--202},
    volume = {I-3},
    abstract = {Our objective is the categorization of the most dominant objects in facade images, like windows, entrances and balconies. In order to execute an image interpretation of complex scenes we need an interaction between low level bottom-up feature detection and highlevel inference from top-down. A top-down approach would use results of a bottom-up detection step as evidence for some high-level inference of scene interpretation. We present a statistically founded object categorization procedure that is suited for bottom-up object detection. Instead of choosing a bag of features in advance and learning models based on these features, it is more natural to learn which features best describe the target object classes. Therefore we learn increasingly complex aggregates of line junctions in image sections from man-made scenes. We present a method for the classification of image sections by using the histogram of diverse types of line aggregates.},
    city = {Melbourne},
    doi = {10.5194/isprsannals-I-3-197-2012},
    proceeding = {ISPRS Annals of Photogrammetry, Remote Sensing and the Spatial Information Sciences; Proc. of 22nd Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS)},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2012Learning.pdf},
    }

  • Spatial Cognition VIII, C. Stachniss, K. Schill, and D. Uttal, Eds., Springer, 2012.
    [BibTeX]
    [none]
    @Book{stachniss2012a,
    title = {Spatial Cognition VIII},
    editor = {C. Stachniss and K. Schill and D. Uttal},
    publisher = {Springer},
    year = {2012},
    month = {August},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

2011

  • S. Asadi, M. Reggente, C. Stachniss, C. Plagemann, and A. J. Lilienthal, “Intelligent Systems for Machine Olfaction: Tools and Methodologies,” , E. L. Hines and M. S. Leeson, Eds., IGI Global, 2011, pp. 153-179.
    [BibTeX]
    [none]
    @InBook{asadi2011,
    title = {Intelligent Systems for Machine Olfaction: Tools and Methodologies},
    author = {S. Asadi and M. Reggente and C. Stachniss and C. Plagemann and A.J. Lilienthal},
    chapter = {Statistical Gas Distribution Modelling using Kernel Methods},
    editor = {E.L. Hines and M.S. Leeson},
    pages = {153-179},
    publisher = {{IGI} {G}lobal},
    year = {2011},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • S. D. Bauer, F. Korč, and W. Förstner, “The potential of automatic methods of classification to identify leaf diseases from multispectral images,” Precision Agriculture, vol. 12, iss. 3, p. 361–377, 2011. doi:10.1007/s11119-011-9217-6
    [BibTeX] [PDF]

    Three methods of automatic classification of leaf diseases are described based on high-resolution multispectral stereo images. Leaf diseases are economically important as they can cause a loss of yield. Early and reliable detection of leaf diseases has important practical relevance, especially in the context of precision agriculture for localized treatment with fungicides. We took stereo images of single sugar beet leaves with two cameras (RGB and multispectral) in a laboratory under well controlled illumination conditions. The leaves were either healthy or infected with the leaf spot pathogen Cercospora beticola or the rust fungus Uromyces betae. To fuse information from the two sensors, we generated 3-D models of the leaves. We discuss the potential of two pixelwise methods of classification: k-nearest neighbour and an adaptive Bayes classification with minimum risk assuming a Gaussian mixture model. The medians of pixelwise classification rates achieved in our experiments are 91% for Cercospora beticola and 86% for Uromyces betae. In addition, we investigated the potential of contextual classification with the so called conditional random field method, which seemed to eliminate the typical errors of pixelwise classification.

    @Article{bauer2011potential,
    title = {The potential of automatic methods of classification to identify leaf diseases from multispectral images},
    author = {Bauer, Sabine Daniela and Kor{\vc}, Filip and F\"orstner, Wolfgang},
    journal = {Precision Agriculture},
    year = {2011},
    number = {3},
    pages = {361--377},
    volume = {12},
    abstract = {Three methods of automatic classification of leaf diseases are described based on high-resolution multispectral stereo images. Leaf diseases are economically important as they can cause a loss of yield. Early and reliable detection of leaf diseases has important practical relevance, especially in the context of precision agriculture for localized treatment with fungicides. We took stereo images of single sugar beet leaves with two cameras (RGB and multispectral) in a laboratory under well controlled illumination conditions. The leaves were either healthy or infected with the leaf spot pathogen Cercospora beticola or the rust fungus Uromyces betae. To fuse information from the two sensors, we generated 3-D models of the leaves. We discuss the potential of two pixelwise methods of classification: k-nearest neighbour and an adaptive Bayes classification with minimum risk assuming a Gaussian mixture model. The medians of pixelwise classification rates achieved in our experiments are 91% for Cercospora beticola and 86% for Uromyces betae. In addition, we investigated the potential of contextual classification with the so called conditional random field method, which seemed to eliminate the typical errors of pixelwise classification.},
    doi = {10.1007/s11119-011-9217-6},
    url = {https://www.ipb.uni-bonn.de/pdfs/Bauer2011potential.pdf},
    }

  • J. Becker, C. Bersch, D. Pangercic, B. Pitzer, T. Rühr, B. Sankaran, J. Sturm, C. Stachniss, M. Beetz, and W. Burgard, “Mobile Manipulation of Kitchen Containers,” in Proc. of the IROS’11 Workshop on Results, Challenges and Lessons Learned in Advancing Robots with a Common Platform, San Francisco, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{becker2011,
    title = {Mobile Manipulation of Kitchen Containers},
    author = {J. Becker and C. Bersch and D. Pangercic and B. Pitzer and T. R\"uhr and B. Sankaran and J. Sturm and C. Stachniss and M. Beetz and W. Burgard},
    booktitle = {Proc. of the IROS'11 Workshop on Results, Challenges and Lessons Learned in Advancing Robots with a Common Platform},
    year = {2011},
    address = {San Francisco, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/becker11irosws.pdf},
    }

  • M. Bennewitz, D. Maier, A. Hornung, and C. Stachniss, “Integrated Perception and Navigation in Complex Indoor Environments,” in Proc. of the IEEE-RAS Int. Conf. on Humanoid Robots (HUMANOIDS), 2011.
    [BibTeX]
    [none]
    @InProceedings{bennewitz2011,
    title = {Integrated Perception and Navigation in Complex Indoor Environments},
    author = {M. Bennewitz and D. Maier and A. Hornung and C. Stachniss},
    booktitle = {Proc. of the IEEE-RAS Int. Conf. on Humanoid Robots (HUMANOIDS)},
    year = {2011},
    note = {Invited presentation at the workshop on Humanoid service robot navigation in crowded and dynamic environments},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • T. Dickscheid, F. Schindler, and W. Förstner, “Coding Images with Local Features,” International Journal of Computer Vision, vol. 94, iss. 2, p. 154–174, 2011. doi:10.1007/s11263-010-0340-z
    [BibTeX] [PDF]

    We present a scheme for measuring completeness of local feature extraction in terms of image coding. Completeness is here considered as good coverage of relevant image information by the features. As each feature requires a certain number of bits which are representative for a certain subregion of the image, we interpret the coverage as a sparse coding scheme. The measure is therefore based on a comparison of two densities over the image domain: An entropy density p_H(x) based on local image statistics, and a feature coding density p_c(x) which is directly computed from each particular set of local features. Motivated by the coding scheme in JPEG, the entropy distribution is derived from the power spectrum of local patches around each pixel position in a statistically sound manner. As the total number of bits for coding the image and for representing it with local features may be different, we measure incompleteness by the Hellinger distance between p_H(x) and p_c(x). We will derive a procedure for measuring incompleteness of possibly mixed sets of local features and show results on standard datasets using some of the most popular region and keypoint detectors, including Lowe, MSER and the recently published SFOP detectors. Furthermore, we will draw some interesting conclusions about the complementarity of detectors.

    @Article{dickscheid2011coding,
    title = {Coding Images with Local Features},
    author = {Dickscheid, Timo and Schindler, Falko and F\"orstner, Wolfgang},
    journal = {International Journal of Computer Vision},
    year = {2011},
    number = {2},
    pages = {154--174},
    volume = {94},
    abstract = {We present a scheme for measuring completeness of local feature extraction in terms of image coding. Completeness is here considered as good coverage of relevant image information by the features. As each feature requires a certain number of bits which are representative for a certain subregion of the image, we interpret the coverage as a sparse coding scheme. The measure is therefore based on a comparison of two densities over the image domain: An entropy density p_H(x) based on local image statistics, and a feature coding density p_c(x) which is directly computed from each particular set of local features. Motivated by the coding scheme in JPEG, the entropy distribution is derived from the power spectrum of local patches around each pixel position in a statistically sound manner. As the total number of bits for coding the image and for representing it with local features may be different, we measure incompleteness by the Hellinger distance between p_H(x) and p_c(x). We will derive a procedure for measuring incompleteness of possibly mixed sets of local features and show results on standard datasets using some of the most popular region and keypoint detectors, including Lowe, MSER and the recently published SFOP detectors. Furthermore, we will draw some interesting conclusions about the complementarity of detectors.},
    doi = {10.1007/s11263-010-0340-z},
    issn = {0920-5691},
    issue = {2},
    publisher = {Springer Netherlands},
    url = {https://www.ipb.uni-bonn.de/pdfs/Dickscheid2011Coding.pdf},
    }

  • T. F. Dominicus, “Vergleich von Verfahren zur Rekonstruktion von Oberflächen,” bachelor thesis Master Thesis, 2011.
    [BibTeX]

    \textbf{Summary} There is a growing demand for digital 3D-models in various disciplines. Dense point clouds are often the basis for these. These point clouds can be generated by a variety of different methods. One possible method is Stereo matching. There are different approaches to this. In this thesis, we examine three different Stereo matching Algorithms and compare their qualities with respect to accuracy, point density and point distribution. The used Algorithms are the Patch-based Multi-view stereo Software, the Semi-global Matching and the 3-Image Semi-global matching. In order to test these methods, we conduct two experiments. Each method is used to create dense point cloud, which we then compare to a reference cloud. The reference clouds are predetermined in the first Experiment and gathered with a Laser triangulation scanner in the second. The resulting point cloud is then analyzed. We predicted, that both SGM Algorithms perform better than the PMVS all examined characteristics. However, our experiments show that this is only true under certain conditions. While the point density and distribution is considerably higher in the first experiment, the accuracy is slightly lower compared to the PMVS. Both SGM methods show even worse results in the second experiment. Here, the density of the results of the SGM is lower and the distribution is slightly better. The accuracy of the SGM is on the same level as the PMVS. The 3-Image SGM only produced only a very sparse point cloud with a high number of outliers. We could not calculate an accuracy rating for this method. However, we assume that these findings are due to poor camera orientation in the second experiment. \textbf{Zusammenfassung} Der Bedarf an digitalen 3D-Modellen in verschiedenen Disziplinen nimmt stetig zu. Grundlage dafür sind oft Dichte Punktwolken. Diese Punktwolken können mit Hilfe verschiedener Verfahren erstellt werden. Eine Möglichkeit ist das Stereomatching. Dabei gibt es verschiedene Ansätze. In dieser Arbeit untersuchen wir drei verschiedene Stereomatching Algorithmen und vergleichen deren Eigenschaften in Bezug auf Genauigkeit, Punktdichte und Punktverteilung. Die verwendeten Verfahren sind die Multi-view stereo Software, das Semi-global Matching und das 3-Bild Semi-global matching. Um diese Verfahren zu untersuchen haben wir zwei Experimente durchgeführt. Wir verwenden jede dieser Methoden um eine dichte Punktwolke aus mehreren Bildern einer Szene zu erstellen. Diese Punktwolken vergleichen wir dann mit einer Referenzpunktwolke. Im ersten Experiment ist diese Referenz vorgegeben. Im zweiten Experiment erstellen wir diese Referenz, in dem wir die Szene mit einem Lasertriangulationsscanner erfassen . Wir hatten erwartet, dass die beiden SGM Algorithmen in allen drei Eigenschaften dem PMVS überlegen ist. Unsere Experimente zeigen jedoch, dass dies nur unter bestimmten Bedingungen der Fall ist. Während die Punktdichte im ersten Experiment beim SGM deutlich höher und die Punktverteilung besser ist, ist die Genauigkeit etwas geringer als die des PMVS. Beide SGM Verfahren bringen im zweiten Experiment noch schlechtere Ergebnisse. Die Punktdichte in den Punktwolken des SGM ist geringer und die Punktverteilung leicht besser. Die Genauigkeit des SGM ist leicht schlechter als die des PMVS. The 3-Image SGM only produced only a very sparse point cloud with a high number of outliers. We could not calculate an accuracy rating for this method. However, we assume that these findings are due to poor camera orientation in the second experiment. Das 3-Bild SGM berechnet hier nur eine sehr dünne Punktwolke mit einer hohen Zahl an Ausreißern. Wir konnten keine Punktwolke erstellen, bei der die Berechnung der Genauigkeit sinnvoll gewesen wäre. Wir vermuten jedoch, dass dies nicht am Algorithmus, sondern an einer schlechten Orientierung der Kameras im zweiten Experiment liegt.

    @MastersThesis{dominicus2011vergleich,
    title = {Vergleich von Verfahren zur Rekonstruktion von Oberfl\"achen},
    author = {Dominicus, Tim Florian},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2011},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Inf. Jan Siegemund},
    type = {bachelor thesis},
    abstract = {\textbf{Summary} There is a growing demand for digital 3D-models in various disciplines. Dense point clouds are often the basis for these. These point clouds can be generated by a variety of different methods. One possible method is Stereo matching. There are different approaches to this. In this thesis, we examine three different Stereo matching Algorithms and compare their qualities with respect to accuracy, point density and point distribution. The used Algorithms are the Patch-based Multi-view stereo Software, the Semi-global Matching and the 3-Image Semi-global matching. In order to test these methods, we conduct two experiments. Each method is used to create dense point cloud, which we then compare to a reference cloud. The reference clouds are predetermined in the first Experiment and gathered with a Laser triangulation scanner in the second. The resulting point cloud is then analyzed. We predicted, that both SGM Algorithms perform better than the PMVS all examined characteristics. However, our experiments show that this is only true under certain conditions. While the point density and distribution is considerably higher in the first experiment, the accuracy is slightly lower compared to the PMVS. Both SGM methods show even worse results in the second experiment. Here, the density of the results of the SGM is lower and the distribution is slightly better. The accuracy of the SGM is on the same level as the PMVS. The 3-Image SGM only produced only a very sparse point cloud with a high number of outliers. We could not calculate an accuracy rating for this method. However, we assume that these findings are due to poor camera orientation in the second experiment. \textbf{Zusammenfassung} Der Bedarf an digitalen 3D-Modellen in verschiedenen Disziplinen nimmt stetig zu. Grundlage daf\"ur sind oft Dichte Punktwolken. Diese Punktwolken k\"onnen mit Hilfe verschiedener Verfahren erstellt werden. Eine M\"oglichkeit ist das Stereomatching. Dabei gibt es verschiedene Ans\"atze. In dieser Arbeit untersuchen wir drei verschiedene Stereomatching Algorithmen und vergleichen deren Eigenschaften in Bezug auf Genauigkeit, Punktdichte und Punktverteilung. Die verwendeten Verfahren sind die Multi-view stereo Software, das Semi-global Matching und das 3-Bild Semi-global matching. Um diese Verfahren zu untersuchen haben wir zwei Experimente durchgef\"uhrt. Wir verwenden jede dieser Methoden um eine dichte Punktwolke aus mehreren Bildern einer Szene zu erstellen. Diese Punktwolken vergleichen wir dann mit einer Referenzpunktwolke. Im ersten Experiment ist diese Referenz vorgegeben. Im zweiten Experiment erstellen wir diese Referenz, in dem wir die Szene mit einem Lasertriangulationsscanner erfassen . Wir hatten erwartet, dass die beiden SGM Algorithmen in allen drei Eigenschaften dem PMVS \"uberlegen ist. Unsere Experimente zeigen jedoch, dass dies nur unter bestimmten Bedingungen der Fall ist. W\"ahrend die Punktdichte im ersten Experiment
    beim SGM deutlich h\"oher und die Punktverteilung besser ist, ist die Genauigkeit etwas geringer als die des PMVS. Beide SGM Verfahren bringen im zweiten Experiment noch schlechtere Ergebnisse. Die Punktdichte in den Punktwolken des SGM ist geringer und die Punktverteilung leicht besser. Die Genauigkeit des SGM ist leicht schlechter als die des PMVS. The 3-Image SGM only produced only a very sparse point cloud with a high number of outliers. We could not calculate an accuracy rating for this method. However, we assume that these findings are due to poor camera orientation in the second experiment. Das 3-Bild SGM berechnet hier nur eine sehr d\"unne Punktwolke mit einer hohen Zahl an Ausrei{\ss}ern. Wir konnten keine Punktwolke erstellen, bei der die Berechnung der Genauigkeit sinnvoll gewesen w\"are. Wir vermuten jedoch, dass dies nicht am Algorithmus, sondern an einer schlechten Orientierung der Kameras im zweiten Experiment liegt.},
    city = {Bonn},
    }

  • B. Frank, C. Stachniss, N. Abdo, and W. Burgard, “Using Gaussian Process Regression for Efficient Motion Planning in Environments with Deformable Objects,” in Proc. of the AAAI-11 Workshop on Automated Action Planning for Autonomous Mobile Robots (PAMR), San Francisco, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{frank2011,
    title = {Using Gaussian Process Regression for Efficient Motion Planning in Environments with Deformable Objects},
    author = {B. Frank and C. Stachniss and N. Abdo and W. Burgard},
    booktitle = {Proc. of the AAAI-11 Workshop on Automated Action Planning for Autonomous Mobile Robots (PAMR)},
    year = {2011},
    address = {San Francisco, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/frank11pamr.pdf},
    }

  • B. Frank, C. Stachniss, N. Abdo, and W. Burgard, “Efficient Motion Planning for Manipulation Robots in Environments with Deformable Objects,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), San Francisco, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{frank2011a,
    title = {Efficient Motion Planning for Manipulation Robots in Environments with Deformable Objects},
    author = {B. Frank and C. Stachniss and N. Abdo and W. Burgard},
    booktitle = iros,
    year = {2011},
    address = {San Francisco, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/frank11iros.pdf},
    }

  • M. Hans and R. Roscher, “Zuordnen radiometrischer Informationen zu Laserscandaten von Weintrauben,” Department of Photogrammetry, University of Bonn 2011.
    [BibTeX] [PDF]

    In diesem Report stellen wir zwei Verfahren vor, die radiometrische Informationen 3D-Scandaten zuordnen. Radiometrische Informationen unterstützen und verbessern die Anwendungen der Merkmalserfassung von Objekten, da sie weitere Kenntnisse über das gescannte Objekt liefern.

    @TechReport{hans2011zuordnen,
    title = {Zuordnen radiometrischer Informationen zu Laserscandaten von Weintrauben},
    author = {Hans, Mathias and Roscher, Ribana},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2011},
    abstract = {In diesem Report stellen wir zwei Verfahren vor, die radiometrische Informationen 3D-Scandaten zuordnen. Radiometrische Informationen unterst\"utzen und verbessern die Anwendungen der Merkmalserfassung von Objekten, da sie weitere Kenntnisse \"uber das gescannte Objekt liefern.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Hans2011Zuordnen.pdf},
    }

  • R. Kümmerle, G. Grisetti, C. Stachniss, and W. Burgard, “Simultaneous Parameter Calibration, Localization, and Mapping for Robust Service Robotics,” in Proc. of the IEEE Workshop on Advanced Robotics and its Social Impacts, Half-Moon Bay, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{kummerle2011,
    title = {Simultaneous Parameter Calibration, Localization, and Mapping for Robust Service Robotics},
    author = {R. K\"ummerle and G. Grisetti and C. Stachniss and W. Burgard},
    booktitle = {Proc. of the IEEE Workshop on Advanced Robotics and its Social Impacts},
    year = {2011},
    address = {Half-Moon Bay, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/kuemmerle11arso.pdf},
    }

  • S. Klemenjak and B. Waske, “Classifying Multilevel Segmented TerraSAR-X Data, using Support Vector Machines,” in 4th TerraSAR-X Science Team Meeting, 2011.
    [BibTeX] [PDF]

    To segment a image with strongly varying object sizes results generally in under-segmentation of small structures or over-segmentation of big ones, which consequences poor classification accuracies. A strategy to produce multiple segmentations of one image and classification with support vector machines (SVM) of this segmentation stack afterwards is shown.

    @InProceedings{klemenjak2011classifying,
    title = {Classifying Multilevel Segmented TerraSAR-X Data, using Support Vector Machines},
    author = {Klemenjak, Sascha and Waske, Bj\"orn},
    booktitle = {4th TerraSAR-X Science Team Meeting},
    year = {2011},
    abstract = {To segment a image with strongly varying object sizes results generally in under-segmentation of small structures or over-segmentation of big ones, which consequences poor classification accuracies. A strategy to produce multiple segmentations of one image and classification with support vector machines (SVM) of this segmentation stack afterwards is shown.},
    owner = {waske},
    timestamp = {2012.09.05},
    url = {https://www.ipb.uni-bonn.de/pdfs/Klemenjak2011Classifying.pdf},
    }

  • H. Kretzschmar and C. Stachniss, “Pose Graph Compression for Laser-based SLAM,” in Proc. of the Intl. Symposium of Robotics Research (ISRR), Flagstaff, AZ, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{kretzschmar2011a,
    title = {Pose Graph Compression for Laser-based {SLAM}},
    author = {H. Kretzschmar and C. Stachniss},
    booktitle = isrr,
    year = {2011},
    address = {Flagstaff, AZ, USA},
    note = {Invited presentation},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss11isrr.pdf},
    }

  • H. Kretzschmar, C. Stachniss, and G. Grisetti, “Efficient Information-Theoretic Graph Pruning for Graph-Based SLAM with Laser Range Finders,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), San Francisco, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{kretzschmar2011,
    title = {Efficient Information-Theoretic Graph Pruning for Graph-Based {SLAM} with Laser Range Finders},
    author = {H. Kretzschmar and C. Stachniss and G. Grisetti},
    booktitle = iros,
    year = {2011},
    address = {San Francisco, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/kretzschmar11iros.pdf},
    }

  • B. Mack and B. Waske, “Optimizing support vector data description by automatically generated outliers.,” in 7th Works. of the EARSeL Special Interest Group Imaging Spectroscopy, 2011.
    [BibTeX]
    [none]
    @InProceedings{mack2011optimizing,
    title = {Optimizing support vector data description by automatically generated outliers.},
    author = {Mack, Benjamin and Waske, Bj{\"o}rn},
    booktitle = {7th Works. of the EARSeL Special Interest Group Imaging Spectroscopy},
    year = {2011},
    abstract = {[none]},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • D. Maier, M. Bennewitz, and C. Stachniss, “Self-supervised Obstacle Detection for Humanoid Navigation Using Monocular Vision and Sparse Laser Data,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Shanghai, China, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{maier2011,
    title = {Self-supervised Obstacle Detection for Humanoid Navigation Using Monocular Vision and Sparse Laser Data},
    author = {D. Maier and M. Bennewitz and C. Stachniss},
    booktitle = icra,
    year = {2011},
    address = {Shanghai, China},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/maier11icra.pdf},
    }

  • R. Roscher, F. Schindler, and W. Förstner, “What would you look like in Springfield%3F Linear Transformations between High-Dimensional Spaces,” Department of Photogrammetry, University of Bonn 2011.
    [BibTeX] [PDF]

    High-dimensional data structures occur in many fields of computer vision and machine learning. Transformation between two high-dimensional spaces usually involves the determination of a large amount of parameters and requires much labeled data to be given. There is much interest in reducing dimensionality if a lower-dimensional structure is underlying the data points. We present a procedure to enable the determination of a low-dimensional, projective transformation between two data sets, making use of state-of-the-art dimensional reduction algorithms. We evaluate multiple algorithms during several experiments with different objectives. We demonstrate the use of this procedure for applications like classification and assignments between two given data sets. Our procedure is semi-supervised due to the fact that all labeled and unlabeled points are used for the dimensionality reduction, but only few them have to be labeled. Using test data we evaluate the quantitative and qualitative performance of different algorithms with respect to the classification and assignment task. We show that with these algorithms and our transformation approach high-dimensional data sets can be related to each other. Finally we can use this procedure to match real world facial images with cartoon images from Springfield, home town of the famous Simpsons.

    @TechReport{roscher2011what,
    title = {What would you look like in Springfield? Linear Transformations between High-Dimensional Spaces},
    author = {Roscher, Ribana and Schindler, Falko and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2011},
    abstract = {High-dimensional data structures occur in many fields of computer vision and machine learning. Transformation between two high-dimensional spaces usually involves the determination of a large amount of parameters and requires much labeled data to be given. There is much interest in reducing dimensionality if a lower-dimensional structure is underlying the data points. We present a procedure to enable the determination of a low-dimensional, projective transformation between two data sets, making use of state-of-the-art dimensional reduction algorithms. We evaluate multiple algorithms during several experiments with different objectives. We demonstrate the use of this procedure for applications like classification and assignments between two given data sets. Our procedure is semi-supervised due to the fact that all labeled and unlabeled points are used for the dimensionality reduction, but only few them have to be labeled. Using test data we evaluate the quantitative and qualitative performance of different algorithms with respect to the classification and assignment task. We show that with these algorithms and our transformation approach high-dimensional data sets can be related to each other. Finally we can use this procedure to match real world facial images with cartoon images from Springfield, home town of the famous Simpsons.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2011What.pdf},
    }

  • R. Roscher, B. Waske, and W. Förstner, “Incremental import vector machines for large area land cover classification,” in IEEE International Conf. on Computer Vision Workshops (ICCV Workshops), 2011. doi:10.1109/ICCVW.2011.6130249
    [BibTeX]

    The classification of large areas consisting of multiple scenes is challenging regarding the handling of large and therefore mostly inhomogeneous data sets. Moreover, large data sets demand for computational efficient methods. We propose a method, which enables the efficient multi-class classification of large neighboring Landsat scenes. We use an incremental realization of the import vector machines, called I2VM, in combination with self-training to update an initial learned classifier with new training data acquired in the overlapping areas between neighboring Landsat scenes. We show in our experiments, that I2VM is a suitable classifier for large area land cover classification.

    @InProceedings{roscher2011incremental,
    title = {Incremental import vector machines for large area land cover classification},
    author = {Roscher, Ribana and Waske, Bj\"orn and F\"orstner, Wolfgang},
    booktitle = {{IEEE} International Conf. on Computer Vision Workshops (ICCV Workshops)},
    year = {2011},
    abstract = {The classification of large areas consisting of multiple scenes is challenging regarding the handling of large and therefore mostly inhomogeneous data sets. Moreover, large data sets demand for computational efficient methods. We propose a method, which enables the efficient multi-class classification of large neighboring Landsat scenes. We use an incremental realization of the import vector machines, called I2VM, in combination with self-training to update an initial learned classifier with new training data acquired in the overlapping areas between neighboring Landsat scenes. We show in our experiments, that I2VM is a suitable classifier for large area land cover classification.},
    doi = {10.1109/ICCVW.2011.6130249},
    keywords = {incremental import vector machines;inhomogeneous data sets;land cover classification;neighboring Landsat scenes;scenes classification;training data acquisition;data acquisition;geophysical image processing;image classification;natural scenes;support vector machines;terrain mapping;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • H. Sardemann, “Registrierung von Bildern mit 3D-Punktwolken,” bachelor thesis Master Thesis, 2011.
    [BibTeX]
    [none]
    @MastersThesis{sardemann2011registrierung,
    title = {Registrierung von Bildern mit 3D-Punktwolken},
    author = {Sardemann, Hannes},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2011},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.- Ing Falko Schindler},
    type = {bachelor thesis},
    abstract = {[none]},
    city = {Bonn},
    }

  • F. Schindler and W. Förstner, “Fast Marching for Robust Surface Segmentation,” in LNCS, Photogrammetric Image Analysis, Munich, 2011, p. 147–158. doi:10.1007/978-3-642-24393-6
    [BibTeX] [PDF]

    We propose a surface segmentation method based on Fast Marching Farthest Point Sampling designed for noisy, visually reconstructed point clouds or laser range data. Adjusting the distance metric between neighboring vertices we obtain robust, edge-preserving segmentations based on local curvature. We formulate a cost function given a segmentation in terms of a description length to be minimized. An incremental-decremental segmentation procedure approximates a global optimum of the cost function and prevents from under- as well as strong over-segmentation. We demonstrate the proposed method on various synthetic and real-world data sets.

    @InProceedings{schindler2011fast,
    title = {Fast Marching for Robust Surface Segmentation},
    author = {Schindler, Falko and F\"orstner, Wolfgang},
    booktitle = {LNCS, Photogrammetric Image Analysis},
    year = {2011},
    address = {Munich},
    note = {Volume Editors: Stilla, Uwe and Rottensteiner, Franz and Mayer, Helmut and Jutzi, Boris and Butenuth, Matthias},
    pages = {147--158},
    abstract = {We propose a surface segmentation method based on Fast Marching Farthest Point Sampling designed for noisy, visually reconstructed point clouds or laser range data. Adjusting the distance metric between neighboring vertices we obtain robust, edge-preserving segmentations based on local curvature. We formulate a cost function given a segmentation in terms of a description length to be minimized. An incremental-decremental segmentation procedure approximates a global optimum of the cost function and prevents from under- as well as strong over-segmentation. We demonstrate the proposed method on various synthetic and real-world data sets.},
    doi = {10.1007/978-3-642-24393-6},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schindler2011Fast.pdf},
    }

  • F. Schindler, W. Förstner, and J. Frahm, “Classification and Reconstruction of Surfaces from Point Clouds of Man-made Objects,” in International Conf. on Computer Vision, IEEE/ISPRS Workshop on Computer Vision for Remote Sensing of the Environment, Barcelona, 2011, p. 257–263. doi:10.1109/ICCVW.2011.6130251
    [BibTeX] [PDF]

    We present a novel surface model and reconstruction method for man-made environments that take prior knowledge about topology and geometry into account. The model favors but is not limited to horizontal and vertical planes that are pairwise orthogonal. The reconstruction method does not require one particular class of sensors, as long as a triangulated point cloud is available. It delivers a complete 3D segmentation, parametrization and classification for both surface regions and inter-plane relations. By working on a pre-segmentation we reduce the computational cost and increase robustness to noise and outliers. All reasoning is statistically motivated, based on a few decision variables with meaningful interpretation in measurement space. We demonstrate our reconstruction method for visual reconstructions and laser range data.

    @InProceedings{schindler2011classification,
    title = {Classification and Reconstruction of Surfaces from Point Clouds of Man-made Objects},
    author = {Schindler, Falko and F\"orstner, Wolfgang and Frahm, Jan-Michael},
    booktitle = {International Conf. on Computer Vision, IEEE/ISPRS Workshop on Computer Vision for Remote Sensing of the Environment},
    year = {2011},
    address = {Barcelona},
    note = {Organizers: Schindler, Konrad and F\"orstner, Wolfgang and Paparoditis, Nicolas},
    pages = {257--263},
    abstract = {We present a novel surface model and reconstruction method for man-made environments that take prior knowledge about topology and geometry into account. The model favors but is not limited to horizontal and vertical planes that are pairwise orthogonal. The reconstruction method does not require one particular class of sensors, as long as a triangulated point cloud is available. It delivers a complete 3D segmentation, parametrization and classification for both surface regions and inter-plane relations. By working on a pre-segmentation we reduce the computational cost and increase robustness to noise and outliers. All reasoning is statistically motivated, based on a few decision variables with meaningful interpretation in measurement space. We demonstrate our reconstruction method for visual reconstructions and laser range data.},
    city = {Barcelona},
    doi = {10.1109/ICCVW.2011.6130251},
    proceeding = {ICCV Workshop on Computer Vision for Remote Sensing of the Environment},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schindler2011Classification.pdf},
    }

  • J. Schittenhelm, “Empirische Untersuchungen zum Einsatz des SFOP-Punktdetektors zur Objektdetektion,” diploma thesis Master Thesis, 2011.
    [BibTeX] [PDF]
    [none]
    @MastersThesis{schittenhelm2011empirische,
    title = {Empirische Untersuchungen zum Einsatz des SFOP-Punktdetektors zur Objektdetektion},
    author = {Schittenhelm, J\"org},
    school = {University of Bonn},
    year = {2011},
    type = {diploma thesis},
    abstract = {[none]},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schittenhelm2011Empirische.pdf},
    }

  • B. Schmeing, T. Läbe, and W. Förstner, “Trajectory Reconstruction Using Long Sequences of Digital Images From an Omnidirectional Camera,” in Proc. of the 31th DGPF Conf. (Jahrestagung), Mainz, 2011, p. 443–452.
    [BibTeX] [PDF]

    We present a method to perform bundle adjustment using long sequences of digital images from an omnidirectional camera. We use the Ladybug3 camera from PointGrey, which consists of six individual cameras pointing in different directions. There is large overlap between successive images but only a few loop closures provide connections between distant camera positions. We face two challenges: (1) to perform a bundle adjustment with images of an omnidirectional camera and (2) implement outlier detection and estimation of initial parameters for the geometry described above. Our program combines the Ladybug?s individual cameras to a single virtual camera and uses a spherical imaging model within the bundle adjustment, solving problem (1). Outlier detection (2) is done using bundle adjustments with small subsets of images followed by a robust adjustment of all images. Approximate values in our context are taken from an on-board inertial navigation system.

    @InProceedings{schmeing2011trajectory,
    title = {Trajectory Reconstruction Using Long Sequences of Digital Images From an Omnidirectional Camera},
    author = {Schmeing, Benno and L\"abe, Thomas and F\"orstner, Wolfgang},
    booktitle = {Proc. of the 31th DGPF Conf. (Jahrestagung)},
    year = {2011},
    address = {Mainz},
    pages = {443--452},
    abstract = {We present a method to perform bundle adjustment using long sequences of digital images from an omnidirectional camera. We use the Ladybug3 camera from PointGrey, which consists of six individual cameras pointing in different directions. There is large overlap between successive images but only a few loop closures provide connections between distant camera positions. We face two challenges: (1) to perform a bundle adjustment with images of an omnidirectional camera and (2) implement outlier detection and estimation of initial parameters for the geometry described above. Our program combines the Ladybug?s individual cameras to a single virtual camera and uses a spherical imaging model within the bundle adjustment, solving problem (1). Outlier detection (2) is done using bundle adjustments with small subsets of images followed by a robust adjustment of all images. Approximate values in our context are taken from an on-board inertial navigation system.},
    city = {Mainz},
    proceeding = {Proc. of the 31th DGPF Conf. (Jahrestagung)},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schmeing2011Trajectory.pdf},
    }

  • J. Schneider, F. Schindler, and W. Förstner, “Bündelausgleichung für Multikamerasysteme,” in Proc. of the 31th DGPF Conf., 2011.
    [BibTeX] [PDF]

    Wir stellen einen Ansatz für eine strenge Bündelausgleichung für Multikamerasysteme vor. Hierzu verwenden wir eine minimale Repräsentation von homogenen Koordinatenvektoren für eine Maximum-Likelihood-Schätzung. Statt den Skalierungsfaktor von homogenen Vektoren durch Verwendung von euklidischen Grö\ssen zu eliminieren, werden die homogenen Koordinaten sphärisch normiert, so dass Bild- und Objektpunkte im Unendlichen repräsentierbar bleiben. Dies ermöglicht auch Bilder omnidirektionaler Kameras mit Einzelblickpunkt, wie Fisheyekameras, und weit entfernte bzw. unendlich ferne Punkte zu behandeln. Speziell Punkte am Horizont können über lange Zeiträume beobachtet werden und liefern somit eine stabile Richtungsinformation. Wir demonstrieren die praktische Umsetzung des Ansatzes anhand einer Bildfolge mit dem Multikamerasystem Ladybug3 von Point Grey, welches mit sechs Kameras 80 % der gesamten Sphäre abbildet.

    @InProceedings{schneider11dgpf,
    title = {B\"undelausgleichung f\"ur Multikamerasysteme},
    author = {J. Schneider and F. Schindler and W. F\"orstner},
    booktitle = {Proc. of the 31th DGPF Conf.},
    year = {2011},
    abstract = {Wir stellen einen Ansatz f\"ur eine strenge B\"undelausgleichung f\"ur Multikamerasysteme vor. Hierzu verwenden wir eine minimale Repr\"asentation von homogenen Koordinatenvektoren f\"ur eine Maximum-Likelihood-Sch\"atzung. Statt den Skalierungsfaktor von homogenen Vektoren durch Verwendung von euklidischen Gr\"o\ssen zu eliminieren, werden die homogenen Koordinaten sph\"arisch normiert, so dass Bild- und Objektpunkte im Unendlichen repr\"asentierbar bleiben. Dies erm\"oglicht auch Bilder omnidirektionaler Kameras mit Einzelblickpunkt, wie Fisheyekameras, und weit entfernte bzw. unendlich ferne Punkte zu behandeln. Speziell Punkte am Horizont k\"onnen \"uber lange Zeitr\"aume beobachtet werden und liefern somit eine stabile Richtungsinformation. Wir demonstrieren die praktische Umsetzung des Ansatzes anhand einer Bildfolge mit dem Multikamerasystem Ladybug3 von Point Grey, welches mit sechs Kameras 80 % der gesamten Sph\"are abbildet.},
    city = {Mainz},
    url = {https://www.ipb.uni-bonn.de/pdfs/schneider11dgpf.pdf},
    }

  • S. Schoppohl, “Klassifikation von Multispektralen und Hyperspektralen Fernerkundungsdaten mittels sequentieller Klassifikationsverfahren,” bachelor thesis Master Thesis, 2011.
    [BibTeX]

    Geography, climate and vegetation – elements in today’s changing. These changes have to be observed and analyzed in detail. To assure being up-to-date the classification of image data is a common procedure in remote sensing. For the implementation of image data classification many classification methods were developed and modified over the past years. The classification methods, the image data and the study area mainly affect the classification accuracy. In particular the progress of increasing training data showed a boost of classification accuracy. Though the costs and expenditure of time are very high in purchasing such training data. Nevertheless so called semi-supervised classification methods try to resolve this problem. In this bachelor thesis the focus is set on the Random Forest developed by Breiman. This classifier is combined with an incremental method. After this the classifier is able to generate new training data. Hence we implement the self-training method. To create an incremental Random Forest we proceed in several phases. First we train a conventional Random Forest with a small set of training data. In a second Phase the predicted classification is made. This allows pixel whose land use classes are unknown to be provided with pseudo-classes. At the same time the accuracy assessment is made on the trained Random Forest. For this we use the predefined test data from the given dataset. In a third stage the selection of the new training data is made. We define a threshold, so the new training data is not randomly selected. The confidence level of the new training data is measured on this threshold. If there is a sufficient number of new training data, which reach or exceed this confidence level, the new training data is added to the existing training data. On this basis a new Random Forest can be trained. This sequential process is determined by a specified iteration, or is stopped prematurely by a stopping criterion. Afterwards it is possible to classify a multi-spectral and hyperspectral dataset The assessment concluded that the combination parameters of the incremental Random Forest have a crucial impact on the classification results. Depending on the data set various configurations of parameters have to be tested. While comparing the conventional Random Forest with the incremental Random Forest partly significant differences in the classification results are obvious. Furthermore it should be noted that only a few class accuracy could be increased with the incremental Random Forest. Though the present thesis provides a good foundation to exploit the potential of the incremental Random Forest for further investigations.

    @MastersThesis{schoppohl2011klassifikation,
    title = {Klassifikation von Multispektralen und Hyperspektralen Fernerkundungsdaten mittels sequentieller Klassifikationsverfahren},
    author = {Schoppohl, Sebastian-Alexander},
    school = {Institute of Photogrammetry},
    year = {2011},
    note = {Betreuung: Prof. Dr. Bj\"orn Waske, Dipl.-Ing. Ribana Roscher},
    type = {bachelor thesis},
    abstract = {Geography, climate and vegetation - elements in today's changing. These changes have to be observed and analyzed in detail. To assure being up-to-date the classification of image data is a common procedure in remote sensing. For the implementation of image data classification many classification methods were developed and modified over the past years. The classification methods, the image data and the study area mainly affect the classification accuracy. In particular the progress of increasing training data showed a boost of classification accuracy. Though the costs and expenditure of time are very high in purchasing such training data. Nevertheless so called semi-supervised classification methods try to resolve this problem. In this bachelor thesis the focus is set on the Random Forest developed by Breiman. This classifier is combined with an incremental method. After this the classifier is able to generate new training data. Hence we implement the self-training method. To create an incremental Random Forest we proceed in several phases. First we train a conventional Random Forest with a small set of training data. In a second Phase the predicted classification is made. This allows pixel whose land use classes are unknown to be provided with pseudo-classes. At the same time the accuracy assessment is made on the trained Random Forest. For this we use the predefined test data from the given dataset. In a third stage the selection of the new training data is made. We define a threshold, so the new training data is not randomly selected. The confidence level of the new training data is measured on this threshold. If there is a sufficient number of new training data, which reach or exceed this confidence level, the new training data is added to the existing training data. On this basis a new Random Forest can be trained. This sequential process is determined by a specified iteration, or is stopped prematurely by a stopping criterion. Afterwards it is possible to classify a multi-spectral and hyperspectral dataset The assessment concluded that the combination parameters of the incremental Random Forest have a crucial impact on the classification results. Depending on the data set various configurations of parameters have to be tested. While comparing the conventional Random Forest with the incremental Random Forest partly significant differences in the classification results are obvious. Furthermore it should be noted that only a few class accuracy could be increased with the incremental Random Forest. Though the present thesis provides a good foundation to exploit the potential of the incremental Random Forest for further investigations.},
    city = {Bonn},
    }

  • J. Siegemund, U. Franke, and W. Förstner, “A Temporal Filter Approach for Detection and Reconstruction of Curbs and Road Surfaces based on Conditional Random Fields,” in IEEE Intelligent Vehicles Symposium (IV), 2011, pp. 637-642. doi:10.1109/IVS.2011.5940447
    [BibTeX] [PDF]

    A temporal filter approach for real-time detection and reconstruction of curbs and road surfaces from 3D point clouds is presented. Instead of local thresholding, as used in many other approaches, a 3D curb model is extracted from the point cloud. The 3D points are classified to different parts of the model (i.e. road and sidewalk) using a temporally integrated Conditional Random Field (CRF). The parameters of curb and road surface are then estimated from the respectively assigned points, providing a temporal connection via a Kalman filter. In this contribution, we employ dense stereo vision for data acquisition. Other sensors capturing point cloud data, e.g. lidar, would also be suitable. The system was tested on real-world scenarios, showing the advantages over a temporally unfiltered version, due to robustness, accuracy and computation time. Further, the lateral accuracy of the system is evaluated. The experiments show the system to yield highly accurate results, for curved and straight-line curbs, up to distances of 20 meters from the camera.

    @InProceedings{siegemund2011temporal,
    title = {A Temporal Filter Approach for Detection and Reconstruction of Curbs and Road Surfaces based on Conditional Random Fields},
    author = {Siegemund, Jan and Franke, Uwe and F\"orstner, Wolfgang},
    booktitle = {IEEE Intelligent Vehicles Symposium (IV)},
    year = {2011},
    month = {June},
    pages = {637-642},
    publisher = {IEEE Computer Society},
    abstract = {A temporal filter approach for real-time detection and reconstruction of curbs and road surfaces from 3D point clouds is presented. Instead of local thresholding, as used in many other approaches, a 3D curb model is extracted from the point cloud. The 3D points are classified to different parts of the model (i.e. road and sidewalk) using a temporally integrated Conditional Random Field (CRF). The parameters of curb and road surface are then estimated from the respectively assigned points, providing a temporal connection via a Kalman filter. In this contribution, we employ dense stereo vision for data acquisition. Other sensors capturing point cloud data, e.g. lidar, would also be suitable. The system was tested on real-world scenarios, showing the advantages over a temporally unfiltered version, due to robustness, accuracy and computation time. Further, the lateral accuracy of the system is evaluated. The experiments show the system to yield highly accurate results, for curved and straight-line curbs, up to distances of 20 meters from the camera.},
    doi = {10.1109/IVS.2011.5940447},
    url = {https://www.ipb.uni-bonn.de/pdfs/Siegemund2011Temporal.pdf},
    }

  • J. Sturm, C. Stachniss, and W. Burgard, “A Probabilistic Framework for Learning Kinematic Models of Articulated Objects,” Journal on Artificial Intelligence Research, vol. 41, p. 477–526, 2011.
    [BibTeX] [PDF]
    [none]
    @Article{sturm2011,
    title = {A Probabilistic Framework for Learning Kinematic Models of Articulated Objects},
    author = {J. Sturm and C. Stachniss and W. Burgard},
    journal = jair,
    year = {2011},
    pages = {477--526},
    volume = {41},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/sturm11jair.pdf},
    }

  • B. Uebbing, “Untersuchung zur Nutzung wiederholter Strukturen für die 3D Rekonstruktion aus Einzelaufnahmen,” bachelor thesis Master Thesis, 2011.
    [BibTeX]

    \textbf{Summary} The goal of this work is the derivation of 3D-information from single images. Therefore identical repeated structures are used. These structures are common in man-made scenes. The repeated structures can be seen as multiple pictures of a single object. At first we simplify the problem by projecting it from 3D to 2D. Thus we introduce 1D cameras by taking the rows and columns of the image sections showing the repeated structures. By rectifying the image we can assume the normal case. By reconstructing and intersecting the projection rays of corresponding points from three 1D cameras 2D profiles of the repeated structure can be recovered. Using these profiles we can derive depth information and their uncertainty. By combining more than one profile in horizontal and vertical direction even a 3D model of the repeated structure can be recovered. We pursue this approach in two ways. First we discuss a simulation program which applies the developed concept under optimal circumstances. Furthermore we verify our estimate of the theoretical uncertainty by performing an empirical test. Second we test our approach on real images. Therefore we use images of building facades in which we use geometrically identical windows as repeated objects. In this process edge-feature extraction and matching of these features plays a major role with real images. We examine our results and conclude that our approach performs very well in the theoretical environment of the simulation program. There it is possible to create 2D profiles with a relative uncertainty of depth of 0.04% to 2%, depending on the assumption of the theoretical uncertainty. Also the reconstruction of 3D information of the used model in the simulation performs very well. The results on real images lack in completeness and precision caused by uncertainties during the edgefeature extraction and the following matching of the 1D edgepoints. The results are not very reliable and meaningful. This is mostly due to the relatively small depth of the repeated structures. Mostly, just horizontal 2D profiles can be recovered, because there are not three identical windows on top of each other. Other major sources of uncertainties are incidences of light, radial image distortions and disturbing objects behind the windows or reflections of objects. Our approach is therefore only of limited use on the images used by us. To produce good results with our approach we require certain circumstances like a high resolution image, so the repeated structures are also displayed in a high resolution. Furthermore the repeated objects should have a certain amount of depth, so the parallax is significant. \textbf{Zusammenfassung} Ziel dieser Arbeit ist die Ableitung von 3D-Informationen aus Einzelaufnahmen. Dazu werden identische, wiederholte Strukturen verwendet. Diese treten in von Menschenhand geschaffenen Objekten sehr häufig auf. Wir betrachten diese wiederholten Strukturen als mehrere Aufnahmen eines Objektes. Zunächst vereinfachen wir die Problemstellung, indem wir die 3D Rekonstruktion von Punkten und Linien auf eine 2D Rekonstruktion von Punkten reduzieren. Dazu werden 1D Kameras eingeführt. Die Zeilen und Spalten von Bildausschnitten wiederholter Objekte werden dabei als Aufnahmen von 1D Kameras betrachtet. Aufgrund der Rektifizierung der Bilder können wir das Vorliegen des Normalfalls annehmen. Durch Rekonstruktion und Verschneiden der Abbildungsstrahlen von korrespondierenden Punkten aus drei 1D Kameras werden 2D Profile rekonstruiert. Aus diesen lassen sich Tiefeninformationen und deren Genauigkeit ableiten. Durch Kombination mehrerer Profile in horizontaler und vertikaler Richtung lassen sich unter optimalen Bedingungen 3D Modelle der wiederholten Strukturen erstellen. Wir verfolgen diesen Ansatz auf zwei Wegen. Zunächst wird ein Simulationsprogramm behandelt, welches das entwickelte Konzept an einem Modell unter optimalen Bedingungen testet. Dabei wird zudem die Annahme der theoretischen Genauigkeit empirisch überprüft. In einem nächsten Schritt wird der Ansatz für die Anwendung auf echte Bilder übertragen. Dazu verwenden wir Aufnahmen von Gebäudefassaden, bei denen wir geometrisch identische Fenster als wiederholte Strukturen betrachten. Dabei spielen besonders Aspekte wie Kantenextraktion und eine korrekte Zuordnung korrespondierender Kanten eine Rolle. Letztendlich stellen wir fest, dass der von uns verfolgte Ansatz in der Theorie des Simulationsprogramms sehr gute Ergebnisse liefert. Es ist möglich 2D Profile mit einer relativen Tiefengenauigkeit von 0.04% bis 2%, je nach Annahme der theoretischen Genauigkeit, zu erstellen. Die Rekonstruktion der 3D Informationen des im Simulationsprogramm verwendeten Modells gelingt sehr gut. Die Anwendung auf echte Bilder liefert weniger gute Resultate. Durch Ungenauigkeiten in der Kantenextraktion und der Zuordnung am Rand der wiederholten Strukturen und einer zu geringen Tiefe der verwendeten Testobjekte sind die Ergebnisse nicht sehr akkurat und aussagekräftig. In der Regel werden nur horizontale 2D Profile erstellt, da meist nicht drei identische Fensterstrukturen übereinander liegen. Zudem spielen weitere Faktoren wie Lichteinfall, Verzeichnungen und Störobjekte in den von uns verwendeten Fenstern eine Rolle. Unser entwickeltes Verfahren lässt sich daher nur bedingt zur Rekonstruktion auf den von uns verwendeten Bildern benutzen.

    @MastersThesis{uebbing2011untersuchung,
    title = {Untersuchung zur Nutzung wiederholter Strukturen f\"ur die 3D Rekonstruktion aus Einzelaufnahmen},
    author = {Uebbing, Bernd},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2011},
    type = {bachelor thesis},
    abstract = {\textbf{Summary} The goal of this work is the derivation of 3D-information from single images. Therefore identical repeated structures are used. These structures are common in man-made scenes. The repeated structures can be seen as multiple pictures of a single object. At first we simplify the problem by projecting it from 3D to 2D. Thus we introduce 1D cameras by taking the rows and columns of the image sections showing the repeated structures. By rectifying the image we can assume the normal case. By reconstructing and intersecting the projection rays of corresponding points from three 1D cameras 2D profiles of the repeated structure can be recovered. Using these profiles we can derive depth information and their uncertainty. By combining more than one profile in horizontal and vertical direction even a 3D model of the repeated structure can be recovered. We pursue this approach in two ways. First we discuss a simulation program which applies the developed concept under optimal circumstances. Furthermore we verify our estimate of the theoretical uncertainty by performing an empirical test. Second we test our approach on real images. Therefore we use images of building facades in which we use geometrically identical windows as repeated objects. In this process edge-feature extraction and matching of these features plays a major role with real images. We examine our results and conclude that our approach performs very well in the theoretical environment of the simulation program. There it is possible to create 2D profiles with a relative uncertainty of depth of 0.04% to 2%, depending on the assumption of the theoretical uncertainty. Also the reconstruction of 3D information of the used model in the simulation performs very well. The results on real images lack in completeness and precision caused by uncertainties during the edgefeature extraction and the following matching of the 1D edgepoints. The results are not very reliable and meaningful. This is mostly due to the relatively small depth of the repeated structures. Mostly, just horizontal 2D profiles can be recovered, because there are not three identical windows on top of each other. Other major sources of uncertainties are incidences of light, radial image distortions and disturbing objects behind the windows or reflections of objects. Our approach is therefore only of limited use on the images used by us. To produce good results with our approach we require certain circumstances like a high resolution image, so the repeated structures are also displayed in a high resolution. Furthermore the repeated objects should have a certain amount of depth, so the parallax is significant. \textbf{Zusammenfassung} Ziel dieser Arbeit ist die Ableitung von 3D-Informationen aus Einzelaufnahmen. Dazu werden identische, wiederholte Strukturen verwendet. Diese treten in von Menschenhand geschaffenen Objekten sehr h\"aufig auf. Wir betrachten diese wiederholten Strukturen als mehrere
    Aufnahmen eines Objektes. Zun\"achst vereinfachen wir die Problemstellung, indem wir die 3D Rekonstruktion von Punkten und Linien auf eine 2D Rekonstruktion von Punkten reduzieren. Dazu werden 1D Kameras eingef\"uhrt. Die Zeilen und Spalten von Bildausschnitten wiederholter Objekte werden dabei als Aufnahmen von 1D Kameras betrachtet. Aufgrund der Rektifizierung der Bilder k\"onnen wir das Vorliegen des Normalfalls annehmen. Durch Rekonstruktion und Verschneiden der Abbildungsstrahlen von korrespondierenden Punkten aus drei 1D Kameras werden 2D Profile rekonstruiert. Aus diesen lassen sich Tiefeninformationen und deren Genauigkeit ableiten. Durch Kombination mehrerer Profile in horizontaler und vertikaler Richtung lassen sich unter optimalen Bedingungen 3D Modelle der wiederholten Strukturen erstellen. Wir verfolgen diesen Ansatz auf zwei Wegen. Zun\"achst wird ein Simulationsprogramm behandelt, welches das entwickelte Konzept an einem Modell unter optimalen Bedingungen testet. Dabei wird zudem die Annahme der theoretischen Genauigkeit empirisch \"uberpr\"uft. In einem n\"achsten Schritt wird der Ansatz f\"ur die Anwendung auf echte Bilder \"ubertragen. Dazu verwenden wir Aufnahmen von Geb\"audefassaden, bei denen wir geometrisch identische Fenster als wiederholte Strukturen betrachten. Dabei spielen besonders Aspekte wie Kantenextraktion und eine korrekte Zuordnung korrespondierender Kanten eine Rolle. Letztendlich stellen wir fest, dass der von uns verfolgte Ansatz in der Theorie des Simulationsprogramms sehr gute Ergebnisse liefert. Es ist m\"oglich 2D Profile mit einer relativen Tiefengenauigkeit von 0.04% bis 2%, je nach Annahme der theoretischen Genauigkeit, zu erstellen. Die Rekonstruktion der 3D Informationen des im Simulationsprogramm verwendeten Modells gelingt sehr gut. Die Anwendung auf echte Bilder liefert weniger gute Resultate. Durch Ungenauigkeiten in der Kantenextraktion und der Zuordnung am Rand der wiederholten Strukturen und einer zu geringen Tiefe der verwendeten Testobjekte sind die Ergebnisse nicht sehr akkurat und aussagekr\"aftig. In der Regel werden nur horizontale 2D Profile erstellt, da meist nicht drei identische Fensterstrukturen \"ubereinander liegen. Zudem spielen weitere Faktoren wie Lichteinfall, Verzeichnungen und St\"orobjekte in den von uns verwendeten Fenstern eine Rolle. Unser entwickeltes Verfahren l\"asst sich daher nur bedingt zur Rekonstruktion auf den von uns verwendeten Bildern benutzen.},
    city = {Bonn},
    }

  • B. Waske, R. Roscher, and S. Klemenjak, “Import Vector Machines Based Classification of Multisensor Remote Sensing Data,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2011. doi:10.1109/IGARSS.2011.6049829
    [BibTeX]

    The classification of multisensor data sets, consisting of multitemporal SAR data and multispectral is addressed. In the present study, Import Vector Machines (IVM) are applied on two data sets, consisting of (i) Envisat ASAR/ERS-2 SAR data and a Landsat 5 TM scene, and (h) TerraSAR-X data and a RapidEye scene. The performance of IVM for classifying multisensor data is evaluated and the method is compared to Support Vector Machines (SVM) in terms of accuracy and complexity. In general, the experimental results demonstrate that the classification accuracy is improved by the multisensor data set. Moreover, IVM and SVM perform similar in terms of the classification accuracy. However, the number of import vectors is considerably less than the number of support vectors, and thus the computation time of the IVM classification is lower. IVM can directly be applied to the multi-class problems and provide probabilistic outputs. Overall IVM constitutes a feasible method and alternative to SVM.

    @InProceedings{waske2011import,
    title = {Import Vector Machines Based Classification of Multisensor Remote Sensing Data},
    author = {Waske, Bj\"orn and Roscher, Ribana and Klemenjak, Sascha},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2011},
    abstract = {The classification of multisensor data sets, consisting of multitemporal SAR data and multispectral is addressed. In the present study, Import Vector Machines (IVM) are applied on two data sets, consisting of (i) Envisat ASAR/ERS-2 SAR data and a Landsat 5 TM scene, and (h) TerraSAR-X data and a RapidEye scene. The performance of IVM for classifying multisensor data is evaluated and the method is compared to Support Vector Machines (SVM) in terms of accuracy and complexity. In general, the experimental results demonstrate that the classification accuracy is improved by the multisensor data set. Moreover, IVM and SVM perform similar in terms of the classification accuracy. However, the number of import vectors is considerably less than the number of support vectors, and thus the computation time of the IVM classification is lower. IVM can directly be applied to the multi-class problems and provide probabilistic outputs. Overall IVM constitutes a feasible method and alternative to SVM.},
    doi = {10.1109/IGARSS.2011.6049829},
    keywords = {Envisat ASAR ERS-2 SAR data;IVM;Landsat 5 TM scene;RapidEye scene;SVM comparison;TerraSAR-X data;computation time;data classification;import vector machines;multisensor remote sensing data;multispectral data;multitemporal SAR data;support vector machines;geophysical image processing;image classification;knowledge engineering;radar imaging;remote sensing by radar;spaceborne radar;synthetic aperture radar;},
    }

  • K. M. Wurm, D. Hennes, D. Holz, R. B. Rusu, C. Stachniss, K. Konolige, and W. Burgard, “Hierarchies of Octrees for Efficient 3D Mapping,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), San Francisco, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{wurm2011,
    title = {Hierarchies of Octrees for Efficient 3D Mapping},
    author = {K.M. Wurm and D. Hennes and D. Holz and R.B. Rusu and C. Stachniss and K. Konolige and W. Burgard},
    booktitle = iros,
    year = {2011},
    address = {San Francisco, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm11iros.pdf},
    }

  • M. Y. Yang, “Hierarchical and Spatial Structures for Interpreting Images of Man-made Scenes Using Graphical Models,” PhD Thesis, 2011.
    [BibTeX] [PDF]

    \textbf{Summary} The task of semantic scene interpretation is to label the regions of an image and their relations into meaningful classes. Such task is a key ingredient to many computer vision applications, including object recognition, 3D reconstruction and robotic perception. It is challenging partially due to the ambiguities inherent to the image data. The images of man-made scenes, e. g. the building facade images, exhibit strong contextual dependencies in the form of the spatial and hierarchical structures. Modelling these structures is central for such interpretation task. Graphical models provide a consistent framework for the statistical modelling. Bayesian networks and random fields are two popular types of the graphical models, which are frequently used for capturing such contextual information. The motivation for our work comes from the belief that we can find a generic formulation for scene interpretation that having both the benefits from random fields and Bayesian networks. It should have clear semantic interpretability. Therefore our key contribution is the development of a generic statistical graphical model for scene interpretation, which seamlessly integrates different types of the image features, and the spatial structural information and the hierarchical structural information defined over the multi-scale image segmentation. It unifies the ideas of existing approaches, e. g. conditional random field (CRF) and Bayesian network (BN), which has a clear statistical interpretation as the maximum a posteriori (MAP) estimate of a multi-class labelling problem. Given the graphical model structure, we derive the probability distribution of the model based on the factorization property implied in the model structure. The statistical model leads to an energy function that can be optimized approximately by either loopy belief propagation or graph cut based move making algorithm. The particular type of the features, the spatial structure, and the hierarchical structure however is not prescribed. In the experiments, we concentrate on terrestrial man-made scenes as a specifically difficult problem. We demonstrate the application of the proposed graphical model on the task of multi-class classification of building facade image regions. The framework for scene interpretation allows for significantly better classification results than the standard classical local classification approach on man-made scenes by incorporating the spatial and hierarchical structures. We investigate the performance of the algorithms on a public dataset to show the relative importance ofthe information from the spatial structure and the hierarchical structure. As a baseline for the region classification, we use an efficient randomized decision forest classifier. Two specific models are derived from the proposed graphical model, namely the hierarchical CRF and the hierarchical mixed graphical model. We show that these two models produce better classification results than both the baseline region classifier and the flat CRF. \textbf{Zusammenfassung} Ziel der semantischen Bildinterpretation ist es, Bildregionen und ihre gegenseitigen Beziehungen zu kennzeichnen und in sinnvolle Klassen einzuteilen. Dies ist eine der Hauptaufgabe in vielen Bereichen des maschinellen Sehens, wie zum Beispiel der Objekterkennung, 3D Rekonstruktion oder der Wahrnehmung von Robotern. Insbesondere Bilder anthropogener Szenen, wie z.B. Fassadenaufnahmen, sind durch starke räumliche und hierarchische Strukturen gekennzeichnet. Diese Strukturen zu modellieren ist zentrale Teil der Interpretation, für deren statistische Modellierung graphische Modelle ein geeignetes konsistentes Werkzeug darstellen. Bayes Netze und Zufallsfelder sind zwei bekannte und häufig genutzte Beispiele für graphische Modelle zur Erfassung kontextabhängiger Informationen. Die Motivation dieser Arbeit liegt in der Überzeugung, dass wir eine generische Formulierung der Bildinterpretation mit klarer semantischer Bedeutung finden können, die die Vorteile von Bayes Netzen und Zufallsfeldern verbindet. Der Hauptbeitrag der vorliegenden Arbeit liegt daher in der Entwicklung eines generischen statistischen graphischen Modells zur Bildinterpretation, welches unterschiedlichste Typen von Bildmerkmalen und die räumlichen sowie hierarchischen Strukturinformationen über eine multiskalen Bildsegmentierung integriert. Das Modell vereinheitlicht die existierender Arbeiten zugrunde liegenden Ideen, wie bedingter Zufallsfelder (conditional random field (CRF)) und Bayesnetze (Bayesian network (BN)). Dieses Modell hat eine klare statistische Interpretation als Maximum a posteriori (MAP) Schätzer eines mehr klassen Zuordnungsproblems. Gegeben die Struktur des graphischen Modells und den dadurch definierten Faktorisierungseigenschaften leiten wir die Wahrscheinlichkeitsverteilung des Modells ab. Dies führt zu einer Energiefunktion, die näherungsweise optimiert werden kann. Der jeweilige Typ der Bildmerkmale, die räumliche sowie hierarchische Struktur ist von dieser Formulierung unabhängig. Wir zeigen die Anwendung des vorgeschlagenen graphischen Modells anhand der mehrklassen Zuordnung von Bildregionen in Fassadenaufnahmen. Wir demonstrieren, dass das vorgeschlagene Verfahren zur Bildinterpretation, durch die Berücksichtigung räumlicher sowie hierarchischer Strukturen, signifikant bessere Klassifikationsergebnisse zeigt, als klassische lokale Klassifikationsverfahren. Die Leistungsfähigkeit des vorgeschlagenen Verfahrens wird anhand eines öffentlich verfügbarer Datensatzes evaluiert. Zur Klassifikation der Bildregionen nutzen wir ein Verfahren basierend auf einem effizienten Random Forest Klassifikator. Aus dem vorgeschlagenen allgemeinen graphischen Modell werden konkret zwei spezielle Modelle abgeleitet, ein hierarchisches bedingtes Zufallsfeld (hierarchical CRF) sowie ein hierarchisches gemischtes graphisches Modell. Wir zeigen, dass beide Modelle bessere Klassifikationsergebnisse erzeugen als die zugrunde liegenden lokalen Klassifikatoren oder die einfachen bedingten Zufallsfelder.

    @PhDThesis{yang2011hierarchical,
    title = {Hierarchical and Spatial Structures for Interpreting Images of Man-made Scenes Using Graphical Models},
    author = {Michael Ying Yang},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2011},
    abstract = {\textbf{Summary} The task of semantic scene interpretation is to label the regions of an image and their relations into meaningful classes. Such task is a key ingredient to many computer vision applications, including object recognition, 3D reconstruction and robotic perception. It is challenging partially due to the ambiguities inherent to the image data. The images of man-made scenes, e. g. the building facade images, exhibit strong contextual dependencies in the form of the spatial and hierarchical structures. Modelling these structures is central for such interpretation task. Graphical models provide a consistent framework for the statistical modelling. Bayesian networks and random fields are two popular types of the graphical models, which are frequently used for capturing such contextual information. The motivation for our work comes from the belief that we can find a generic formulation for scene interpretation that having both the benefits from random fields and Bayesian networks. It should have clear semantic interpretability. Therefore our key contribution is the development of a generic statistical graphical model for scene interpretation, which seamlessly integrates different types of the image features, and the spatial structural information and the hierarchical structural information defined over the multi-scale image segmentation. It unifies the ideas of existing approaches, e. g. conditional random field (CRF) and Bayesian network (BN), which has a clear statistical interpretation as the maximum a posteriori (MAP) estimate of a multi-class labelling problem. Given the graphical model structure, we derive the probability distribution of the model based on the factorization property implied in the model structure. The statistical model leads to an energy function that can be optimized approximately by either loopy belief propagation or graph cut based move making algorithm. The particular type of the features, the spatial structure, and the hierarchical structure however is not prescribed. In the experiments, we concentrate on terrestrial man-made scenes as a specifically difficult problem. We demonstrate the application of the proposed graphical model on the task of multi-class classification of building facade image regions. The framework for scene interpretation allows for significantly better classification results than the standard classical local classification approach on man-made scenes by incorporating the spatial and hierarchical structures. We investigate the performance of the algorithms on a public dataset to show the relative importance ofthe information from the spatial structure and the hierarchical structure. As a baseline for the region classification, we use an efficient randomized decision forest classifier. Two specific models are derived from the proposed graphical model, namely the hierarchical CRF and the hierarchical mixed graphical model. We show that these two models produce better
    classification results than both the baseline region classifier and the flat CRF. \textbf{Zusammenfassung} Ziel der semantischen Bildinterpretation ist es, Bildregionen und ihre gegenseitigen Beziehungen zu kennzeichnen und in sinnvolle Klassen einzuteilen. Dies ist eine der Hauptaufgabe in vielen Bereichen des maschinellen Sehens, wie zum Beispiel der Objekterkennung, 3D Rekonstruktion oder der Wahrnehmung von Robotern. Insbesondere Bilder anthropogener Szenen, wie z.B. Fassadenaufnahmen, sind durch starke r\"aumliche und hierarchische Strukturen gekennzeichnet. Diese Strukturen zu modellieren ist zentrale Teil der Interpretation, f\"ur deren statistische Modellierung graphische Modelle ein geeignetes konsistentes Werkzeug darstellen. Bayes Netze und Zufallsfelder sind zwei bekannte und h\"aufig genutzte Beispiele f\"ur graphische Modelle zur Erfassung kontextabh\"angiger Informationen. Die Motivation dieser Arbeit liegt in der \"Uberzeugung, dass wir eine generische Formulierung der Bildinterpretation mit klarer semantischer Bedeutung finden k\"onnen, die die Vorteile von Bayes Netzen und Zufallsfeldern verbindet. Der Hauptbeitrag der vorliegenden Arbeit liegt daher in der Entwicklung eines generischen statistischen graphischen Modells zur Bildinterpretation, welches unterschiedlichste Typen von Bildmerkmalen und die r\"aumlichen sowie hierarchischen Strukturinformationen \"uber eine multiskalen Bildsegmentierung integriert. Das Modell vereinheitlicht die existierender Arbeiten zugrunde liegenden Ideen, wie bedingter Zufallsfelder (conditional random field (CRF)) und Bayesnetze (Bayesian network (BN)). Dieses Modell hat eine klare statistische Interpretation als Maximum a posteriori (MAP) Sch\"atzer eines mehr klassen Zuordnungsproblems. Gegeben die Struktur des graphischen Modells und den dadurch definierten Faktorisierungseigenschaften leiten wir die Wahrscheinlichkeitsverteilung des Modells ab. Dies f\"uhrt zu einer Energiefunktion, die n\"aherungsweise optimiert werden kann. Der jeweilige Typ der Bildmerkmale, die r\"aumliche sowie hierarchische Struktur ist von dieser Formulierung unabh\"angig. Wir zeigen die Anwendung des vorgeschlagenen graphischen Modells anhand der mehrklassen Zuordnung von Bildregionen in Fassadenaufnahmen. Wir demonstrieren, dass das vorgeschlagene Verfahren zur Bildinterpretation, durch die Ber\"ucksichtigung r\"aumlicher sowie hierarchischer Strukturen, signifikant bessere Klassifikationsergebnisse zeigt, als klassische lokale Klassifikationsverfahren. Die Leistungsf\"ahigkeit des vorgeschlagenen Verfahrens wird anhand eines \"offentlich verf\"ugbarer Datensatzes evaluiert. Zur Klassifikation der Bildregionen nutzen wir ein Verfahren basierend auf einem effizienten Random Forest Klassifikator. Aus dem vorgeschlagenen allgemeinen graphischen Modell werden konkret zwei spezielle Modelle abgeleitet, ein hierarchisches bedingtes Zufallsfeld (hierarchical CRF) sowie ein hierarchisches gemischtes
    graphisches Modell. Wir zeigen, dass beide Modelle bessere Klassifikationsergebnisse erzeugen als die zugrunde liegenden lokalen Klassifikatoren oder die einfachen bedingten Zufallsfelder.},
    url = {https://hss.ulb.uni-bonn.de/2012/2765/2765.htm},
    }

  • M. Y. Yang and W. Förstner, “Feature Evaluation for Building Facade Images – An Empirical Study,” International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences, vol. XXXIX-B3, p. 513–518, 2011. doi:10.5194/isprsarchives-XXXIX-B3-513-2012
    [BibTeX] [PDF]

    The classification of building facade images is a challenging problem that receives a great deal of attention in the photogrammetry community. Image classification is critically dependent on the features. In this paper, we perform an empirical feature evaluation task for building facade images. Feature sets we choose are basic features, color features, histogram features, Peucker features, texture features, and SIFT features. We present an approach for region-wise labeling using an efficient randomized decision forest classifier and local features. We conduct our experiments with building facade image classification on the eTRIMS dataset, where our focus is the object classes building, car, door, pavement, road, sky, vegetation, and window.

    @Article{yang2011feature,
    title = {Feature Evaluation for Building Facade Images - An Empirical Study},
    author = {Yang, Michael Ying and F\"orstner, Wolfgang},
    journal = {International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2011},
    pages = {513--518},
    volume = {XXXIX-B3},
    abstract = {The classification of building facade images is a challenging problem that receives a great deal of attention in the photogrammetry community. Image classification is critically dependent on the features. In this paper, we perform an empirical feature evaluation task for building facade images. Feature sets we choose are basic features, color features, histogram features, Peucker features, texture features, and SIFT features. We present an approach for region-wise labeling using an efficient randomized decision forest classifier and local features. We conduct our experiments with building facade image classification on the eTRIMS dataset, where our focus is the object classes building, car, door, pavement, road, sky, vegetation, and window.},
    doi = {10.5194/isprsarchives-XXXIX-B3-513-2012},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2011Feature.pdf},
    }

  • M. Y. Yang and W. Förstner, “A Hierarchical Conditional Random Field Model for Labeling and Classifying Images of Man-made Scenes,” in International Conf. on Computer Vision, IEEE/ISPRS Workshop on Computer Vision for Remote Sensing of the Environment, 2011. doi:10.1109/ICCVW.2011.6130243
    [BibTeX] [PDF]

    Semantic scene interpretation as a collection of meaningful regions in images is a fundamental problem in both photogrammetry and computer vision. Images of man-made scenes exhibit strong contextual dependencies in the form of spatial and hierarchical structures. In this paper, we introduce a hierarchical conditional random field to deal with the problem of image classification by modeling spatial and hierarchical structures. The probability outputs of an efficient randomized decision forest classifier are used as unary potentials. The spatial and hierarchical structures of the regions are integrated into pairwise potentials. The model is built on multi-scale image analysis in order to aggregate evidence from local to global level. Experimental results are provided to demonstrate the performance of the proposed method using images from eTRIMS dataset, where our focus is the object classes building, car, door, pavement, road, sky, vegetation, and window.

    @InProceedings{yang2011hierarchicala,
    title = {A Hierarchical Conditional Random Field Model for Labeling and Classifying Images of Man-made Scenes},
    author = {Yang, Michael Ying and F\"orstner, Wolfgang},
    booktitle = {International Conf. on Computer Vision, IEEE/ISPRS Workshop on Computer Vision for Remote Sensing of the Environment},
    year = {2011},
    abstract = {Semantic scene interpretation as a collection of meaningful regions in images is a fundamental problem in both photogrammetry and computer vision. Images of man-made scenes exhibit strong contextual dependencies in the form of spatial and hierarchical structures. In this paper, we introduce a hierarchical conditional random field to deal with the problem of image classification by modeling spatial and hierarchical structures. The probability outputs of an efficient randomized decision forest classifier are used as unary potentials. The spatial and hierarchical structures of the regions are integrated into pairwise potentials. The model is built on multi-scale image analysis in order to aggregate evidence from local to global level. Experimental results are provided to demonstrate the performance of the proposed method using images from eTRIMS dataset, where our focus is the object classes building, car, door, pavement, road, sky, vegetation, and window.},
    doi = {10.1109/ICCVW.2011.6130243},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2011Hierarchical.pdf},
    }

  • M. Y. Yang and W. Förstner, “Regionwise Classification of Building Facade Images,” in Photogrammetric Image Analysis (PIA2011), 2011, p. 209 – 220. doi:10.1007/978-3-642-24393-6_18
    [BibTeX] [PDF]

    In recent years, the classification task of building facade images receives a great deal of attention in the photogrammetry community. In this paper, we present an approach for regionwise classification using an efficient randomized decision forest classifier and local features. A conditional random field is then introduced to enforce spatial consistency between neighboring regions. Experimental results are provided to illustrate the performance of the proposed methods using image from eTRIMS database, where our focus is the object classes building, car, door, pavement, road, sky, vegetation, and window.

    @InProceedings{yang2011regionwise,
    title = {Regionwise Classification of Building Facade Images},
    author = {Yang, Michael Ying and F\"orstner, Wolfgang},
    booktitle = {Photogrammetric Image Analysis (PIA2011)},
    year = {2011},
    note = {Stilla, Uwe / Rottensteiner, Franz / Mayer, H. / Jutzi, Boris / Butenuth, Matthias (Hg.); Munich},
    pages = {209 -- 220},
    publisher = {Springer},
    series = {LNCS 6952},
    abstract = {In recent years, the classification task of building facade images receives a great deal of attention in the photogrammetry community. In this paper, we present an approach for regionwise classification using an efficient randomized decision forest classifier and local features. A conditional random field is then introduced to enforce spatial consistency between neighboring regions. Experimental results are provided to illustrate the performance of the proposed methods using image from eTRIMS database, where our focus is the object classes building, car, door, pavement, road, sky, vegetation, and window.},
    doi = {10.1007/978-3-642-24393-6_18},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2011Regionwise.pdf},
    }

  • J. Ziegler, H. Kretzschmar, C. Stachniss, G. Grisetti, and W. Burgard, “Accurate Human Motion Capture in Large Areas by Combining IMU- and Laser-based People Tracking,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), San Francisco, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{ziegler2011,
    title = {Accurate Human Motion Capture in Large Areas by Combining IMU- and Laser-based People Tracking},
    author = {J. Ziegler and H. Kretzschmar and C. Stachniss and G. Grisetti and W. Burgard},
    booktitle = iros,
    year = {2011},
    address = {San Francisco, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/ziegler11iros.pdf},
    }

2010

  • M. Albrecht, “Erkennung bewegter Objekte auf fluktuierendem Hintergrund in Bildfolgen,” Master Thesis, 2010.
    [BibTeX] [PDF]
    [none]
    @MastersThesis{albrecht2010erkennung,
    title = {Erkennung bewegter Objekte auf fluktuierendem Hintergrund in Bildfolgen},
    author = {Albrecht, Markus},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2010},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Ribana Roscher},
    abstract = {[none]},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Albrecht2010Erkennung.pdf},
    }

  • A. Barth and U. Franke, “Tracking Oncoming and Turning Vehicles at Intersections,” in Intelligent Transportation Systems, IEEE Conf. on, Madeira Island, Portugal, 2010, p. 861–868. doi:10.1109/ITSC.2010.5624969
    [BibTeX] [PDF]

    This article addresses the reliable tracking of oncoming traffic at urban intersections from a moving platform with a stereo vision system. Both motion and depth information is combined to estimate the pose and motion parameters of an oncoming vehicle, including the yaw rate, by means of Kalman filtering. Vehicle tracking at intersections is particularly chal- lenging since vehicles can turn quickly. A single filter approach cannot cover the dynamic range of a vehicle sufficiently. We propose a real-time multi-filter approach for vehicle tracking at intersections. A gauge consistency criteria as well as a robust outlier detection method allow for dealing with sudden accelerations and self-occlusions during turn maneuvers. The system is evaluated both on synthetic and real-world data.

    @InProceedings{barth2010tracking,
    title = {Tracking Oncoming and Turning Vehicles at Intersections},
    author = {Barth, Alexander and Franke, Uwe},
    booktitle = {Intelligent Transportation Systems, IEEE Conf. on},
    year = {2010},
    address = {Madeira Island, Portugal},
    pages = {861--868},
    abstract = {This article addresses the reliable tracking of oncoming traffic at urban intersections from a moving platform with a stereo vision system. Both motion and depth information is combined to estimate the pose and motion parameters of an oncoming vehicle, including the yaw rate, by means of Kalman filtering. Vehicle tracking at intersections is particularly chal- lenging since vehicles can turn quickly. A single filter approach cannot cover the dynamic range of a vehicle sufficiently. We propose a real-time multi-filter approach for vehicle tracking at intersections. A gauge consistency criteria as well as a robust outlier detection method allow for dealing with sudden accelerations and self-occlusions during turn maneuvers. The system is evaluated both on synthetic and real-world data.},
    doi = {10.1109/ITSC.2010.5624969},
    url = {https://www.ipb.uni-bonn.de/pdfs/Barth2010Tracking.pdf},
    }

  • A. Barth, J. Siegemund, A. Meißner, U. Franke, and W. Förstner, “Probabilistic Multi-Class Scene Flow Segmentation for Traffic Scenes,” in Pattern Recognition (Symposium of DAGM), 2010, p. 503–512. doi:10.1007/978-3-642-15986-2_51
    [BibTeX] [PDF]

    A multi-class traffic scene segmentation approach based on scene flow data is presented. Opposed to many other approaches using color or texture features, our approach is purely based on dense depth and 3D motion information. Using prior knowledge on tracked objects in the scene and the pixel-wise uncertainties of the scene flow data, each pixel is assigned to either a particular moving object class (tracked/unknown object), the ground surface, or static background. The global topological order of classes, such as objects are above ground, is locally integrated into a conditional random field by an ordering constraint. The proposed method yields very accurate segmentation results on challenging real world scenes, which we made publicly available for comparison.

    @InProceedings{barth2010probabilistic,
    title = {Probabilistic Multi-Class Scene Flow Segmentation for Traffic Scenes},
    author = {Barth, Alexander and Siegemund, Jan and Mei{\ss}ner, Annemarie and Franke, Uwe and F\"orstner, Wolfgang},
    booktitle = {Pattern Recognition (Symposium of DAGM)},
    year = {2010},
    editor = {Goesele, M. and Roth, S. and Kuijper, A. and Schiele, B. and Schindler, K.},
    note = {Darmstadt},
    pages = {503--512},
    publisher = {Springer},
    abstract = {A multi-class traffic scene segmentation approach based on scene flow data is presented. Opposed to many other approaches using color or texture features, our approach is purely based on dense depth and 3D motion information. Using prior knowledge on tracked objects in the scene and the pixel-wise uncertainties of the scene flow data, each pixel is assigned to either a particular moving object class (tracked/unknown object), the ground surface, or static background. The global topological order of classes, such as objects are above ground, is locally integrated into a conditional random field by an ordering constraint. The proposed method yields very accurate segmentation results on challenging real world scenes, which we made publicly available for comparison.},
    doi = {10.1007/978-3-642-15986-2_51},
    url = {https://www.ipb.uni-bonn.de/pdfs/Barth2010Probabilistic.pdf},
    }

  • W. Burgard, K. M. Wurm, M. Bennewitz, C. Stachniss, A. Hornung, R. B. Rusu, and K. Konolige, “Modeling the World Around Us: An Efficient 3D Representation for Personal Robotics,” in Workshop on Defining and Solving Realistic Perception Problems in Personal Robotics at the IEEE/RSJ Int.Conf.on Intelligent Robots and Systems, Taipei, Taiwan, 2010.
    [BibTeX]
    [none]
    @InProceedings{burgard2010,
    title = {Modeling the World Around Us: An Efficient 3D Representation for Personal Robotics},
    author = {Burgard, W. and Wurm, K.M. and Bennewitz, M. and Stachniss, C. and Hornung, A. and Rusu, R.B. and Konolige, K.},
    booktitle = {Workshop on Defining and Solving Realistic Perception Problems in Personal Robotics at the IEEE/RSJ Int.Conf.on Intelligent Robots and Systems},
    year = {2010},
    address = {Taipei, Taiwan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • T. Castaings, B. Waske, J. A. Benediktsson, and J. Chanussot, “On the influence of feature reduction for the classification of hyperspectral images based on the extended morphological profile,” International Journal of Remote Sensing, vol. 31, iss. 22, p. 5975–5991, 2010. doi:10.1080/01431161.2010.512313
    [BibTeX]

    In this study we investigated the classification of hyperspectral data with high spatial resolution. Previously, methods that generate a so-called extended morphological profile (EMP) from the principal components of an image have been proposed to create base images for morphological transformations. However, it can be assumed that the feature reduction (FR) may have a significant effect on the accuracy of the classification of the EMP. We therefore investigated the effect of different FR methods on the generation and classification of the EMP of hyperspectral images from urban areas, using a machine learning-based algorithm for classification. The applied FR methods include: principal component analysis (PCA), nonparametric weighted feature extraction (NWFE), decision boundary feature extraction (DBFE), Gaussian kernel PCA (KPCA) and Bhattacharyya distance feature selection (BDFS). Experiments were run with two classification algorithms: the support vector machine (SVM) and random forest (RF) algorithms. We demonstrate that the commonly used PCA approach seems to be nonoptimal in a large number of cases in terms of classification accuracy, and the other FR methods may be more suitable as preprocessing approaches for the EMP.

    @Article{castaings2010influence,
    title = {On the influence of feature reduction for the classification of hyperspectral images based on the extended morphological profile},
    author = {Castaings, Thibaut and Waske, Bj\"orn and Benediktsson, Jon Atli and Chanussot, Jocelyn},
    journal = {International Journal of Remote Sensing},
    year = {2010},
    number = {22},
    pages = {5975--5991},
    volume = {31},
    abstract = {In this study we investigated the classification of hyperspectral data with high spatial resolution. Previously, methods that generate a so-called extended morphological profile (EMP) from the principal components of an image have been proposed to create base images for morphological transformations. However, it can be assumed that the feature reduction (FR) may have a significant effect on the accuracy of the classification of the EMP. We therefore investigated the effect of different FR methods on the generation and classification of the EMP of hyperspectral images from urban areas, using a machine learning-based algorithm for classification. The applied FR methods include: principal component analysis (PCA), nonparametric weighted feature extraction (NWFE), decision boundary feature extraction (DBFE), Gaussian kernel PCA (KPCA) and Bhattacharyya distance feature selection (BDFS). Experiments were run with two classification algorithms: the support vector machine (SVM) and random forest (RF) algorithms. We demonstrate that the commonly used PCA approach seems to be nonoptimal in a large number of cases in terms of classification accuracy, and the other FR methods may be more suitable as preprocessing approaches for the EMP.},
    doi = {10.1080/01431161.2010.512313},
    owner = {waske},
    sn = {0143-1161},
    tc = {4},
    timestamp = {2012.09.04},
    ut = {WOS:000284956500011},
    z8 = {0},
    z9 = {4},
    zb = {1},
    }

  • X. Ceamanos, B. Waske, J. A. Benediktsson, J. Chanussot, M. Fauvel, and J. R. Sveinsson, “A classifier ensemble based on fusion of support vector machines for classifying hyperspectral data,” International Journal of Image and Data Fusion, vol. 1, iss. 4, p. 293–307, 2010. doi:10.1080/19479832.2010.485935
    [BibTeX]

    Classification of hyperspectral data using a classifier ensemble that is based on support vector machines (SVMs) are addressed. First, the hyperspectral data set is decomposed into a few data sources according to the similarity of the spectral bands. Then, each source is processed separately by performing classification based on SVM. Finally, all outputs are used as input for final decision fusion performed by an additional SVM classifier. Results of the experiments underline how the proposed SVM fusion ensemble outperforms a standard SVM classifier in terms of overall and class accuracies, the improvement being irrespective of the size of the training sample set. The definition of the data sources resulting from the original data set is also studied.

    @Article{ceamanos2010classifier,
    title = {A classifier ensemble based on fusion of support vector machines for classifying hyperspectral data},
    author = {Ceamanos, Xavier and Waske, Bj\"orn and Benediktsson, Jon Atli and Chanussot, Jocelyn and Fauvel, Mathieu and Sveinsson, Johannes R.},
    journal = {International Journal of Image and Data Fusion},
    year = {2010},
    number = {4},
    pages = {293--307},
    volume = {1},
    abstract = {Classification of hyperspectral data using a classifier ensemble that is based on support vector machines (SVMs) are addressed. First, the hyperspectral data set is decomposed into a few data sources according to the similarity of the spectral bands. Then, each source is processed separately by performing classification based on SVM. Finally, all outputs are used as input for final decision fusion performed by an additional SVM classifier. Results of the experiments underline how the proposed SVM fusion ensemble outperforms a standard SVM classifier in terms of overall and class accuracies, the improvement being irrespective of the size of the training sample set. The definition of the data sources resulting from the original data set is also studied.},
    doi = {10.1080/19479832.2010.485935},
    }

  • M. Dalla Mura, J. A. Benediktsson, B. Waske, and L. Bruzzone, “Extended profiles with morphological attribute filters for the analysis of hyperspectral data,” International Journal of Remote Sensing, vol. 31, iss. 22, p. 5975–5991, 2010. doi:10.1080/01431161.2010.512425
    [BibTeX]

    Extended attribute profiles and extended multi-attribute profiles are presented for the analysis of hyperspectral high-resolution images. These extended profiles are based on morphological attribute filters and, through a multi-level analysis, are capable of extracting spatial features that can better model the spatial information, with respect to conventional extended morphological profiles. The features extracted by the proposed extended profiles were considered for a classification task. Two hyperspectral high-resolution datasets acquired for the city of Pavia, Italy, were considered in the analysis. The effectiveness of the introduced operators in modelling the spatial information was proved by the higher classification accuracies obtained with respect to those achieved by a conventional extended morphological profile.

    @Article{dallamura2010extended,
    title = {Extended profiles with morphological attribute filters for the analysis of hyperspectral data},
    author = {Dalla Mura, Mauro and Benediktsson, Jon Atli and Waske, Bj\"orn and Bruzzone, Lorenzo},
    journal = {International Journal of Remote Sensing},
    year = {2010},
    number = {22},
    pages = {5975--5991},
    volume = {31},
    abstract = {Extended attribute profiles and extended multi-attribute profiles are presented for the analysis of hyperspectral high-resolution images. These extended profiles are based on morphological attribute filters and, through a multi-level analysis, are capable of extracting spatial features that can better model the spatial information, with respect to conventional extended morphological profiles. The features extracted by the proposed extended profiles were considered for a classification task. Two hyperspectral high-resolution datasets acquired for the city of Pavia, Italy, were considered in the analysis. The effectiveness of the introduced operators in modelling the spatial information was proved by the higher classification accuracies obtained with respect to those achieved by a conventional extended morphological profile.},
    doi = {10.1080/01431161.2010.512425},
    owner = {waske},
    sn = {0143-1161},
    tc = {7},
    timestamp = {2012.09.04},
    ut = {WOS:000284956500013},
    z8 = {0},
    z9 = {7},
    zb = {0},
    }

  • M. Dalla Mura, J. A. Benediktsson, B. Waske, and L. Bruzzone, “Morphological Attribute Profiles for the Analysis of Very High Resolution Images,” IEEE Transactions on Geoscience and Remote Sensing, vol. 48, iss. 10, p. 3747–3762, 2010. doi:10.1109/TGRS.2010.2048116
    [BibTeX]

    Morphological attribute profiles (APs) are defined as a generalization of the recently proposed morphological profiles (MPs). APs provide a multilevel characterization of an image created by the sequential application of morphological attribute filters that can be used to model different kinds of the structural information. According to the type of the attributes considered in the morphological attribute transformation, different parametric features can be modeled. The generation of APs, thanks to an efficient implementation, strongly reduces the computational load required for the computation of conventional MPs. Moreover, the characterization of the image with different attributes leads to a more complete description of the scene and to a more accurate modeling of the spatial information than with the use of conventional morphological filters based on a predefined structuring element. Here, the features extracted by the proposed operators were used for the classification of two very high resolution panchromatic images acquired by Quickbird on the city of Trento, Italy. The experimental analysis proved the usefulness of APs in modeling the spatial information present in the images. The classification maps obtained by considering different APs result in a better description of the scene (both in terms of thematic and geometric accuracy) than those obtained with an MP.

    @Article{dallamura2010morphological,
    title = {Morphological Attribute Profiles for the Analysis of Very High Resolution Images},
    author = {Dalla Mura, Mauro and Benediktsson, Jon Atli and Waske, Bj\"orn and Bruzzone, Lorenzo},
    journal = {IEEE Transactions on Geoscience and Remote Sensing},
    year = {2010},
    month = oct,
    number = {10},
    pages = {3747--3762},
    volume = {48},
    abstract = {Morphological attribute profiles (APs) are defined as a generalization of the recently proposed morphological profiles (MPs). APs provide a multilevel characterization of an image created by the sequential application of morphological attribute filters that can be used to model different kinds of the structural information. According to the type of the attributes considered in the morphological attribute transformation, different parametric features can be modeled. The generation of APs, thanks to an efficient implementation, strongly reduces the computational load required for the computation of conventional MPs. Moreover, the characterization of the image with different attributes leads to a more complete description of the scene and to a more accurate modeling of the spatial information than with the use of conventional morphological filters based on a predefined structuring element. Here, the features extracted by the proposed operators were used for the classification of two very high resolution panchromatic images acquired by Quickbird on the city of Trento, Italy. The experimental analysis proved the usefulness of APs in modeling the spatial information present in the images. The classification maps obtained by considering different APs result in a better description of the scene (both in terms of thematic and geometric accuracy) than those obtained with an MP.},
    doi = {10.1109/TGRS.2010.2048116},
    owner = {waske},
    sn = {0196-2892},
    tc = {15},
    timestamp = {2012.09.04},
    ut = {WOS:000283349400014},
    z8 = {0},
    z9 = {15},
    zb = {1},
    }

  • W. Förstner, “Minimal Representations for Uncertainty and Estimation in Projective Spaces,” in Proc. of Asian Conf. on Computer Vision, 2010, p. 619–633, Part II. doi:10.1007/978-3-642-19309-5_48
    [BibTeX] [PDF]

    Estimation using homogeneous entities has to cope with obstacles such as singularities of covariance matrices and redundant parametrizations which do not allow an immediate definition of maximum likelihood estimation and lead to estimation problems with more parameters than necessary. The paper proposes a representation of the uncertainty of all types of geometric entities and estimation procedures for geometric entities and transformations which (1) only require the minimum number of parameters, (2) are free of singularities, (3) allow for a consistent update within an iterative procedure, (4) enable to exploit the simplicity of homogeneous coordinates to represent geometric constraints and (5) allow to handle geometric entities which are at in nity or at least very far, avoiding the usage of concepts like the inverse depth. Such representations are already available for transformations such as rotations, motions (Rosenhahn 2002), homographies (Begelfor 2005), or the projective correlation with fundamental matrix (Bartoli 2004) all being elements of some Lie group. The uncertainty is represented in the tangent space of the manifold, namely the corresponding Lie algebra. However, to our knowledge no such representations are developed for the basic geometric entities such as points, lines and planes, as in addition to use the tangent space of the manifolds we need transformation of the entities such that they stay on their specific manifold during the estimation process. We develop the concept, discuss its usefulness for bundle adjustment and demonstrate (a) its superiority compared to more simple methods for vanishing point estimation, (b) its rigour when estimating 3D lines from 3D points and (c) its applicability for determining 3D lines from observed image line segments in a multi view setup.

    @InProceedings{forstner2010minimal,
    title = {Minimal Representations for Uncertainty and Estimation in Projective Spaces},
    author = {F\"orstner, Wolfgang},
    booktitle = {Proc. of Asian Conf. on Computer Vision},
    year = {2010},
    note = {Queenstown, New Zealand},
    pages = {619--633, Part II},
    abstract = {Estimation using homogeneous entities has to cope with obstacles such as singularities of covariance matrices and redundant parametrizations which do not allow an immediate definition of maximum likelihood estimation and lead to estimation problems with more parameters than necessary. The paper proposes a representation of the uncertainty of all types of geometric entities and estimation procedures for geometric entities and transformations which (1) only require the minimum number of parameters, (2) are free of singularities, (3) allow for a consistent update within an iterative procedure, (4) enable to exploit the simplicity of homogeneous coordinates to represent geometric constraints and (5) allow to handle geometric entities which are at in nity or at least very far, avoiding the usage of concepts like the inverse depth. Such representations are already available for transformations such as rotations, motions (Rosenhahn 2002), homographies (Begelfor 2005), or the projective correlation with fundamental matrix (Bartoli 2004) all being elements of some Lie group. The uncertainty is represented in the tangent space of the manifold, namely the corresponding Lie algebra. However, to our knowledge no such representations are developed for the basic geometric entities such as points, lines and planes, as in addition to use the tangent space of the manifolds we need transformation of the entities such that they stay on their specific manifold during the estimation process. We develop the concept, discuss its usefulness for bundle adjustment and demonstrate (a) its superiority compared to more simple methods for vanishing point estimation, (b) its rigour when estimating 3D lines from 3D points and (c) its applicability for determining 3D lines from observed image line segments in a multi view setup.},
    doi = {10.1007/978-3-642-19309-5_48},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2010Minimal.pdf},
    }

  • W. Förstner, “Optimal Vanishing Point Detection and Rotation Estimation of Single Images of a Legolandscene,” in Int. Archives of Photogrammetry and Remote Sensing, 2010, p. 157–163, Part A..
    [BibTeX] [PDF]

    The paper presents a method for automatically and optimally determining the vanishing points of a single image, and in case the interior orientation is given, the rotation of an image with respect to the intrinsic coordinate system of a lego land scene. We perform rigorous testing and estimation in order to be as independent on control parameters as possible. This refers to (1) estimating vanishing points from line segments and the rotation matrix, (2) to testing during RANSAC and during boosting lines and (3) to classifying the line segments w. r. t. their vanishing point. Spherically normalized homogeneous coordinates are used for line segments and especially for vanishing points to allow for points at infinity. We propose a minimal representation for the uncertainty of homogeneous coordinates of 2D points and 2D lines and rotations to avoid the use of singular covariance matrices of observed line segments. This at the same time allows to estimate the parameters with a minimal representation. The vanishing point detection method is experimentally validated on a set of 292 images.

    @InProceedings{forstner2010optimal,
    title = {Optimal Vanishing Point Detection and Rotation Estimation of Single Images of a Legolandscene},
    author = {F\"orstner, Wolfgang},
    booktitle = {Int. Archives of Photogrammetry and Remote Sensing},
    year = {2010},
    organization = {ISPRS Symposium Comm. III, Paris},
    pages = {157--163, Part A.},
    abstract = {The paper presents a method for automatically and optimally determining the vanishing points of a single image, and in case the interior orientation is given, the rotation of an image with respect to the intrinsic coordinate system of a lego land scene. We perform rigorous testing and estimation in order to be as independent on control parameters as possible. This refers to (1) estimating vanishing points from line segments and the rotation matrix, (2) to testing during RANSAC and during boosting lines and (3) to classifying the line segments w. r. t. their vanishing point. Spherically normalized homogeneous coordinates are used for line segments and especially for vanishing points to allow for points at infinity. We propose a minimal representation for the uncertainty of homogeneous coordinates of 2D points and 2D lines and rotations to avoid the use of singular covariance matrices of observed line segments. This at the same time allows to estimate the parameters with a minimal representation. The vanishing point detection method is experimentally validated on a set of 292 images.},
    location = {wf},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2010Optimal.pdf},
    }

  • B. Frank, R. Schmedding, C. Stachniss, M. Teschner, and W. Burgard, “Learning Deformable Object Models for Mobile Robot Path Planning using Depth Cameras and a Manipulation Robot,” in Proc. of the Workshop RGB-D: Advanced Reasoning with Depth Cameras at Robotics: Science and Systems (RSS), Zaragoza, Spain, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{frank2010,
    title = {Learning Deformable Object Models for Mobile Robot Path Planning using Depth Cameras and a Manipulation Robot},
    author = {B. Frank and R. Schmedding and C. Stachniss and M. Teschner and W. Burgard},
    booktitle = {Proc. of the Workshop RGB-D: Advanced Reasoning with Depth Cameras at Robotics: Science and Systems (RSS)},
    year = {2010},
    address = {Zaragoza, Spain},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/frank10rssws.pdf},
    }

  • B. Frank, R. Schmedding, C. Stachniss, M. Teschner, and W. Burgard, “Learning the Elasticity Parameters of Deformable Objects with a Manipulation Robot,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Taipei, Taiwan, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{frank2010a,
    title = {Learning the Elasticity Parameters of Deformable Objects with a Manipulation Robot},
    author = {B. Frank and R. Schmedding and C. Stachniss and M. Teschner and W. Burgard},
    booktitle = iros,
    year = {2010},
    address = {Taipei, Taiwan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/frank10iros.pdf},
    }

  • G. Grisetti, R. Kümmerle, C. Stachniss, and W. Burgard, “A Tutorial on Graph-based SLAM,” IEEE Transactions on Intelligent Transportation Systems Magazine, vol. 2, p. 31–43, 2010.
    [BibTeX] [PDF]
    [none]
    @Article{grisetti2010a,
    title = {A Tutorial on Graph-based {SLAM}},
    author = {G. Grisetti and R. K{\"u}mmerle and C. Stachniss and W. Burgard},
    journal = {IEEE Transactions on Intelligent Transportation Systems Magazine},
    year = {2010},
    pages = {31--43},
    volume = {2},
    abstract = {[none]},
    issue = {4},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti10titsmag.pdf},
    }

  • G. Grisetti, R. Kümmerle, C. Stachniss, U. Frese, and C. Hertzberg, “Hierarchical Optimization on Manifolds for Online 2D and 3D Mapping,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Anchorage, Alaska, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{grisetti2010,
    title = {Hierarchical Optimization on Manifolds for Online 2D and 3D Mapping},
    author = {G. Grisetti and R. K{\"u}mmerle and C. Stachniss and U. Frese and C. Hertzberg},
    booktitle = icra,
    year = {2010},
    address = {Anchorage, Alaska},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti10icra.pdf},
    }

  • A. Hüsgen, “Multi-Modal Segmentation of Anatomical and Functional Image of the Brain,” Diploma Thesis Master Thesis, 2010.
    [BibTeX] [PDF]
    [none]
    @MastersThesis{husgen2010multi,
    title = {Multi-Modal Segmentation of Anatomical and Functional Image of the Brain},
    author = {H\"usgen, Andreas},
    school = {University of Bonn},
    year = {2010},
    note = {Betreuung: Prof. Dr.-Ing. W. F\"orstner, Privatdozent Dr. Volker Steinhage},
    type = {Diploma Thesis},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/Husgen2010Multi.pdf},
    }

  • A. Hecheltjen, B. Waske, F. Thonfeld, M. Braun, and G. Menz, “Support Vector Machines for Multitemporal and Multisensor Change Detection,” in ESA’s Living Planet Symposium (ESA SP-686), 2010.
    [BibTeX]
    [none]
    @InProceedings{hecheltjen2010support,
    title = {Support Vector Machines for Multitemporal and Multisensor Change Detection},
    author = {Hecheltjen, Antje and Waske, Bj\"orn and Thonfeld, Frank and Braun, Matthias and Menz, Gunter},
    booktitle = {ESA's Living Planet Symposium (ESA SP-686)},
    year = {2010},
    abstract = {[none]},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • A. Hornung, M.Bennewitz, C. Stachniss, H. Strasdat, S. Oßwald, and W. Burgard, “Learning Adaptive Navigation Strategies for Resource-Constrained Systems,” in Proc. of the Int. Workshop on Evolutionary and Reinforcement Learning for Autonomous Robot Systems, Lisbon, Portugal, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{hornung2010,
    title = {Learning Adaptive Navigation Strategies for Resource-Constrained Systems},
    author = {A. Hornung and M.Bennewitz and C. Stachniss and H. Strasdat and S. O{\ss}wald and W. Burgard},
    booktitle = {Proc. of the Int. Workshop on Evolutionary and Reinforcement Learning for Autonomous Robot Systems},
    year = {2010},
    address = {Lisbon, Portugal},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/hornung10erlars.pdf},
    }

  • M. Karg, K. M. Wurm, C. Stachniss, K. Dietmayer, and W. Burgard, “Consistent Mapping of Multistory Buildings by Introducing Global Constraints to Graph-based SLAM,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Anchorage, Alaska, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{karg2010,
    title = {Consistent Mapping of Multistory Buildings by Introducing Global Constraints to Graph-based {SLAM}},
    author = {M. Karg and K.M. Wurm and C. Stachniss and K. Dietmayer and W. Burgard},
    booktitle = icra,
    year = {2010},
    address = {Anchorage, Alaska},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/karg10icra.pdf},
    }

  • F. Korč, D. Schneider, and W. Förstner, “On Nonparametric Markov Random Field Estimation for Fast Automatic Segmentation of MRI Knee Data,” in Proc. of the 4th Medical Image Analysis for the Clinic – A Grand Challenge workshop, MICCAI, 2010, p. 261–270.
    [BibTeX] [PDF]

    We present a fast automatic reproducible method for 3d semantic segmentation of magnetic resonance images of the knee. We formulate a single global model that allows to jointly segment all classes. The model estimation was performed automatically without manual interaction and parameter tuning. The segmentation of a magnetic resonance image with 11 Mio voxels took approximately one minute. Our labeling results by far do not reach the performance of complex state of the art approaches designed to produce clinically relevant results. Our results could potentially be useful for rough visualization or initialization of computationally demanding methods. Our main contribution is to provide insights in possible strategies when employing global statistical models

    @InProceedings{korvc2010nonparametric,
    title = {On Nonparametric Markov Random Field Estimation for Fast Automatic Segmentation of MRI Knee Data},
    author = {Kor{\vc}, Filip and Schneider, David and F\"orstner, Wolfgang},
    booktitle = {Proc. of the 4th Medical Image Analysis for the Clinic - A Grand Challenge workshop, MICCAI},
    year = {2010},
    note = {Beijing},
    pages = {261--270},
    abstract = {We present a fast automatic reproducible method for 3d semantic segmentation of magnetic resonance images of the knee. We formulate a single global model that allows to jointly segment all classes. The model estimation was performed automatically without manual interaction and parameter tuning. The segmentation of a magnetic resonance image with 11 Mio voxels took approximately one minute. Our labeling results by far do not reach the performance of complex state of the art approaches designed to produce clinically relevant results. Our results could potentially be useful for rough visualization or initialization of computationally demanding methods. Our main contribution is to provide insights in possible strategies when employing global statistical models},
    url = {https://www.ipb.uni-bonn.de/pdfs/Korvc2010Nonparametric.pdf},
    }

  • H. Kretzschmar, G. Grisetti, and C. Stachniss, “Lifelong Map Learning for Graph-based SLAM in Static Environments,” KI – Künstliche Intelligenz, vol. 24, p. 199–206, 2010.
    [BibTeX]
    [none]
    @Article{kretzschmar2010,
    title = {Lifelong Map Learning for Graph-based {SLAM} in Static Environments},
    author = {H. Kretzschmar and G. Grisetti and C. Stachniss},
    journal = {{KI} -- {K}\"unstliche {I}ntelligenz},
    year = {2010},
    pages = {199--206},
    volume = {24},
    abstract = {[none]},
    issue = {3},
    timestamp = {2014.04.24},
    }

  • J. Müller, C. Stachniss, K. O. Arras, and W. Burgard, “Socially Inspired Motion Planning for Mobile Robots in Populated Environments,” in Cognitive Systems, Springer, 2010.
    [BibTeX]
    [none]
    @InCollection{muller2010,
    title = {Socially Inspired Motion Planning for Mobile Robots in Populated Environments},
    author = {M\"{u}ller, J. and Stachniss, C. and Arras, K.O. and Burgard, W.},
    booktitle = {Cognitive Systems},
    publisher = springer,
    year = {2010},
    note = {In press},
    series = {Cognitive Systems Monographs},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • T. Mewes, B. Waske, J. Franke, and G. Menz, “Derivation of stress severities in wheat from hyperspectral data using support vector regression,” in 2nd Workshop on Hyperspectral Image and Signal Processing: Evolution in Remote Sensing (WHISPERS 2010), 2010. doi:10.1109/WHISPERS.2010.5594921
    [BibTeX]

    The benefits and limitations of crop stress detection by hyperspectral data analysis have been examined in detail. It could thereby be demonstrated that even a differentiation between healthy and fungal infected wheat stands is possible and profits by analyzing entire spectra or specifically selected spectral bands/ranges. For reasons of practicability in agriculture, spatial information about the health status of crop plants beyond a binary classification would be a major benefit. Thus, the potential of hyperspectral data for the derivation of several disease severity classes or moreover the derivation of continual disease severity has to be further examined. In the present study, a state-of-the-art regression approach using support vector machines (SVM) has been applied to hyperspectral AISA-Dual data to derive the disease severity caused by leaf rust (Puccinina recondita) in wheat. Ground truth disease ratings were realized within an experimental field. A mean correlation coefficient of r=0.69 between severities and support vector regression predicted severities could be achieved using indepent training and test data. The results show that the SVR is generally suitable for the derivation of continual disease severity values, but the crucial point is the uncertainty in the reference severity data, which is used to train the regression.

    @InProceedings{mewes2010derivation,
    title = {Derivation of stress severities in wheat from hyperspectral data using support vector regression},
    author = {Mewes, T. and Waske, Bj\"orn and Franke, J. and Menz, G.},
    booktitle = {2nd Workshop on Hyperspectral Image and Signal Processing: Evolution in Remote Sensing (WHISPERS 2010)},
    year = {2010},
    abstract = {The benefits and limitations of crop stress detection by hyperspectral data analysis have been examined in detail. It could thereby be demonstrated that even a differentiation between healthy and fungal infected wheat stands is possible and profits by analyzing entire spectra or specifically selected spectral bands/ranges. For reasons of practicability in agriculture, spatial information about the health status of crop plants beyond a binary classification would be a major benefit. Thus, the potential of hyperspectral data for the derivation of several disease severity classes or moreover the derivation of continual disease severity has to be further examined. In the present study, a state-of-the-art regression approach using support vector machines (SVM) has been applied to hyperspectral AISA-Dual data to derive the disease severity caused by leaf rust (Puccinina recondita) in wheat. Ground truth disease ratings were realized within an experimental field. A mean correlation coefficient of r=0.69 between severities and support vector regression predicted severities could be achieved using indepent training and test data. The results show that the SVR is generally suitable for the derivation of continual disease severity values, but the crucial point is the uncertainty in the reference severity data, which is used to train the regression.},
    doi = {10.1109/WHISPERS.2010.5594921},
    keywords = {AISA-Dual data;Puccinina recondita;agriculture;binary classification;crop stress detection;fungal infected wheat;hyperspectral data;leaf rust;stress severity derivation;support vector machine;support vector regression;agriculture;crops;geophysical techniques;regression analysis;support vector machines;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • M. Muffert, “Verwendung eines mosaikbasierten Kamerasystems zur Bestimmung von räumlichen Orientierungsänderungen von mobilen Objekten,” Master Thesis, 2010.
    [BibTeX] [PDF]

    The estimation of relative spatial positions and orientations is one of the most important tasks of engineering geodesy. For example, we need these parameters in precision farming or controlling the driving direction of construction vehicles. It is usual to use multi-sensor systems in these applications which are often a combination of GPS-sensors with Inertial Navigation Systems (INS). An optimal solution for the searched parameters could be achieved using filtering processes.

    @MastersThesis{muffert2010verwendung,
    title = {Verwendung eines mosaikbasierten Kamerasystems zur Bestimmung von r\"aumlichen Orientierungs\"anderungen von mobilen Objekten},
    author = {Muffert, Maxilmilian},
    school = {Institute of Photogrammetry,University of Bonn},
    year = {2010},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Prof. Dr.-Ing. Heiner Kuhlmann},
    abstract = {The estimation of relative spatial positions and orientations is one of the most important tasks of engineering geodesy. For example, we need these parameters in precision farming or controlling the driving direction of construction vehicles. It is usual to use multi-sensor systems in these applications which are often a combination of GPS-sensors with Inertial Navigation Systems (INS). An optimal solution for the searched parameters could be achieved using filtering processes.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Muffert2010Verwendung.pdf},
    }

  • M. Muffert, J. Siegemund, and W. Förstner, “The estimation of spatial positions by using an omnidirectional camera system,” in 2nd International Conf. on Machine Control & Guidance, 2010, p. 95–104.
    [BibTeX] [PDF]

    With an omnidirectional camera system, it is possible to take 360-degree views of the surrounding area at each camera position. These systems are used particularly in robotic applications, in autonomous navigation and supervision technology for ego-motion estimation. In addition to the visual capture of the environment itself, we can compute the parameters of orientation and position from image sequences, i.e. we get three parameters of position and three of orientation (yaw rate, pitch and roll angle) at each time of acquisition. The aim of the presented project is to investigate the quality of the spatial trajectory of a mobile survey vehicle from the recorded image sequences. In this paper, we explain the required photogrammetric background and show the advantages of omnidirectional camera systems for this task. We present the first results on our test set and discuss alternative applications for omnidirectional cameras.

    @InProceedings{muffert2010estimation,
    title = {The estimation of spatial positions by using an omnidirectional camera system},
    author = {Muffert, Maximilian and Siegemund, Jan and F\"orstner, Wolfgang},
    booktitle = {2nd International Conf. on Machine Control \& Guidance},
    year = {2010},
    month = mar,
    pages = {95--104},
    abstract = {With an omnidirectional camera system, it is possible to take 360-degree views of the surrounding area at each camera position. These systems are used particularly in robotic applications, in autonomous navigation and supervision technology for ego-motion estimation. In addition to the visual capture of the environment itself, we can compute the parameters of orientation and position from image sequences, i.e. we get three parameters of position and three of orientation (yaw rate, pitch and roll angle) at each time of acquisition. The aim of the presented project is to investigate the quality of the spatial trajectory of a mobile survey vehicle from the recorded image sequences. In this paper, we explain the required photogrammetric background and show the advantages of omnidirectional camera systems for this task. We present the first results on our test set and discuss alternative applications for omnidirectional cameras.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Muffert2010estimation.pdf},
    }

  • C. Plagemann, C. Stachniss, J. Hess, F. Endres, and N. Franklin, “A Nonparametric Learning Approach to Range Sensing from Omnidirectional Vision,” Robotics and Autonomous Systems, vol. 58, p. 762–772, 2010.
    [BibTeX]
    [none]
    @Article{plagemann2010,
    title = {A Nonparametric Learning Approach to Range Sensing from Omnidirectional Vision},
    author = {C. Plagemann and C. Stachniss and J. Hess and F. Endres and N. Franklin},
    journal = jras,
    year = {2010},
    pages = {762--772},
    volume = {58},
    abstract = {[none]},
    issue = {6},
    timestamp = {2014.04.24},
    }

  • M. Röder-Sorge, “Konzeption und Anwendung von Entscheidungsnetzwerken im Städtebau,” Diploma Thesis Master Thesis, 2010.
    [BibTeX]

    In dieser Arbeit wird mit dem Programm Netica ein Entscheidungsnetzwerk aufgestellt, das für sechs Gebäude eines Wohnkomplexes in Leipzig-Grünau die optimalen Entscheidungen über deren zukünftige Entwicklung ermittelt. In das Netzwerk werden die Interessen der Mieter, der Stadtverwaltung und der Wohnungsunternehmen Grünaus mit einbezogen, wobei mit letzeren Interviews über die Gewichtung der Einflussfaktoren im Stadtumbau geführt wurden. Netica eignet sich nur mit Einschränkungen für die Modellierung und Entscheidungsfindung im Stadtumbau, da nicht mehr als sechs Gebäude modelliert werden können und, genau wie mit allen anderen Entscheidungsnetzwerkprogrammen, die Darstellung des Free-Rider-Problems nicht möglich ist.

    @MastersThesis{roder-sorge2010konzeption,
    title = {Konzeption und Anwendung von Entscheidungsnetzwerken im St\"adtebau},
    author = {R\"oder-Sorge, Marisa},
    school = {University of Bonn},
    year = {2010},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Prof. Dr.-Ing. Theo K\"otter},
    type = {Diploma Thesis},
    abstract = {In dieser Arbeit wird mit dem Programm Netica ein Entscheidungsnetzwerk aufgestellt, das f\"ur sechs Geb\"aude eines Wohnkomplexes in Leipzig-Gr\"unau die optimalen Entscheidungen \"uber deren zuk\"unftige Entwicklung ermittelt. In das Netzwerk werden die Interessen der Mieter, der Stadtverwaltung und der Wohnungsunternehmen Gr\"unaus mit einbezogen, wobei mit letzeren Interviews \"uber die Gewichtung der Einflussfaktoren im Stadtumbau gef\"uhrt wurden. Netica eignet sich nur mit Einschr\"ankungen f\"ur die Modellierung und Entscheidungsfindung im Stadtumbau, da nicht mehr als sechs Geb\"aude modelliert werden k\"onnen und, genau wie mit allen anderen Entscheidungsnetzwerkprogrammen, die Darstellung des Free-Rider-Problems nicht m\"oglich ist.},
    }

  • R. Roscher, F. Schindler, and W. Förstner, “High Dimensional Correspondences from Low Dimensional Manifolds – An Empirical Comparison of Graph-based Dimensionality Reduction Algorithms,” in The 3rd International Workshop on Subspace Methods, in conjunction with ACCV2010, 2010, p. 10. doi:10.1007/978-3-642-22819-3_34
    [BibTeX] [PDF]

    We discuss the utility of dimensionality reduction algorithms to put data points in high dimensional spaces into correspondence by learning a transformation between assigned data points on a lower dimensional structure. We assume that similar high dimensional feature spaces are characterized by a similar underlying low dimensional structure. To enable the determination of an affine transformation between two data sets we make use of well-known dimensional reduction algorithms. We demonstrate this procedure for applications like classification and assignments between two given data sets and evaluate six well-known algorithms during several experiments with different objectives. We show that with these algorithms and our transformation approach high dimensional data sets can be related to each other. We also show that linear methods turn out to be more suitable for assignment tasks, whereas graph-based methods appear to be superior for classification tasks.

    @InProceedings{roscher2010high,
    title = {High Dimensional Correspondences from Low Dimensional Manifolds -- An Empirical Comparison of Graph-based Dimensionality Reduction Algorithms},
    author = {Roscher, Ribana and Schindler, Falko and F\"orstner, Wolfgang},
    booktitle = {The 3rd International Workshop on Subspace Methods, in conjunction with ACCV2010},
    year = {2010},
    note = {Queenstown, New Zealand},
    pages = {10},
    abstract = {We discuss the utility of dimensionality reduction algorithms to put data points in high dimensional spaces into correspondence by learning a transformation between assigned data points on a lower dimensional structure. We assume that similar high dimensional feature spaces are characterized by a similar underlying low dimensional structure. To enable the determination of an affine transformation between two data sets we make use of well-known dimensional reduction algorithms. We demonstrate this procedure for applications like classification and assignments between two given data sets and evaluate six well-known algorithms during several experiments with different objectives. We show that with these algorithms and our transformation approach high dimensional data sets can be related to each other. We also show that linear methods turn out to be more suitable for assignment tasks, whereas graph-based methods appear to be superior for classification tasks.},
    doi = {10.1007/978-3-642-22819-3_34},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2010High.pdf;Poster:Roscher2010High_Poster.pdf},
    }

  • R. Roscher, B. Waske, and W. Förstner, “Kernel Discriminative Random Fields for land cover classification,” in IAPR Workshop on Pattern Recognition in Remote Sensing (PRRS), 2010. doi:10.1109/PRRS.2010.5742801
    [BibTeX] [PDF]

    Logistic Regression has become a commonly used classifier, not only due to its probabilistic output and its direct usage in multi-class cases. We use a sparse Kernel Logistic Regression approach – the Import Vector Machines – for land cover classification. We improve our segmentation results applying a Discriminative Random Field framework on the probabilistic classification output. We consider the performance regarding to the classification accuracy and the complexity and compare it to the Gaussian Maximum Likelihood classification and the Support Vector Machines.

    @InProceedings{roscher2010kernel,
    title = {Kernel Discriminative Random Fields for land cover classification},
    author = {Roscher, Ribana and Waske, Bj\"orn and F\"orstner, Wolfgang},
    booktitle = {IAPR Workshop on Pattern Recognition in Remote Sensing (PRRS)},
    year = {2010},
    note = {Istanbul, Turkey},
    abstract = {Logistic Regression has become a commonly used classifier, not only due to its probabilistic output and its direct usage in multi-class cases. We use a sparse Kernel Logistic Regression approach - the Import Vector Machines - for land cover classification. We improve our segmentation results applying a Discriminative Random Field framework on the probabilistic classification output. We consider the performance regarding to the classification accuracy and the complexity and compare it to the Gaussian Maximum Likelihood classification and the Support Vector Machines.},
    doi = {10.1109/PRRS.2010.5742801},
    keywords = {Gaussian maximum likelihood classification;image segmentation;import vector machine;kernel discriminative random fields;land cover classification;logistic regression;probabilistic classification;support vector machines;geophysical image processing;image classification;image segmentation;support vector machines;terrain mapping;},
    owner = {waske},
    timestamp = {2012.09.05},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2010Kernel.pdf},
    }

  • J. Siegemund, D. Pfeiffer, U. Franke, and W. Förstner, “Curb Reconstruction using Conditional Random Fields,” in IEEE Intelligent Vehicles Symposium (IV), 2010, p. 203–210. doi:10.1109/IVS.2010.5548096
    [BibTeX] [PDF]

    This paper presents a generic framework for curb detection and reconstruction in the context of driver assistance systems. Based on a 3D point cloud, we estimate the parameters of a 3D curb model, incorporating also the curb adjacent surfaces, e.g. street and sidewalk. We apply an iterative two step approach. First, the measured 3D points, e.g., obtained from dense stereo vision, are assigned to the curb adjacent surfaces using loopy belief propagation on a Conditional Random Field. Based on this result, we reconstruct the surfaces and in particular the curb. Our system is not limited to straight-line curbs, i.e. it is able to deal with curbs of different curvature and varying height. The proposed algorithm runs in real-time on our demon- strator vehicle and is evaluated in urban real-world scenarios. It yields highly accurate results even for low curbs up to 20 m distance.

    @InProceedings{siegemund2010curb,
    title = {Curb Reconstruction using Conditional Random Fields},
    author = {Siegemund, Jan and Pfeiffer, David and Franke, Uwe and F\"orstner, Wolfgang},
    booktitle = {IEEE Intelligent Vehicles Symposium (IV)},
    year = {2010},
    month = jun,
    pages = {203--210},
    publisher = {IEEE Computer Society},
    abstract = {This paper presents a generic framework for curb detection and reconstruction in the context of driver assistance systems. Based on a 3D point cloud, we estimate the parameters of a 3D curb model, incorporating also the curb adjacent surfaces, e.g. street and sidewalk. We apply an iterative two step approach. First, the measured 3D points, e.g., obtained from dense stereo vision, are assigned to the curb adjacent surfaces using loopy belief propagation on a Conditional Random Field. Based on this result, we reconstruct the surfaces and in particular the curb. Our system is not limited to straight-line curbs, i.e. it is able to deal with curbs of different curvature and varying height. The proposed algorithm runs in real-time on our demon- strator vehicle and is evaluated in urban real-world scenarios. It yields highly accurate results even for low curbs up to 20 m distance.},
    doi = {10.1109/IVS.2010.5548096},
    url = {https://www.ipb.uni-bonn.de/pdfs/Siegemund2010Curb.pdf},
    }

  • R. Steffen, J. Frahm, and W. Förstner, “Relative Bundle Adjustment based on Trifocal Constraints,” in ECCV Workshop on Reconstruction and Modeling of Large-Scale 3D Virtual Environments, 2010. doi:10.1007/978-3-642-35740-4_22
    [BibTeX] [PDF]

    In this paper we propose a novel approach to bundle adjustment for large-scale camera configurations. The method does not need to include the 3D points in the optimization as parameters. Additionally, we model the parameters of a camera only relative to a nearby camera to achieve a stable estimation of all cameras. This guarantees to yield a normal equation system with a numerical condition, which practically is independent of the number of images. Secondly, instead of using the classical perspective relation between object point, camera and image point, we use epipolar and trifocal constraints to implicitly establish the relations between the cameras via the object structure. This avoids the explicit reference to 3D points thereby handling points far from the camera in a numerically stable fashion. We demonstrate the resulting stability and high convergence rates using synthetic and real data.

    @InProceedings{steffen2010relative,
    title = {Relative Bundle Adjustment based on Trifocal Constraints},
    author = {Steffen, Richard and Frahm, Jan-Michael and F\"orstner, Wolfgang},
    booktitle = {ECCV Workshop on Reconstruction and Modeling of Large-Scale 3D Virtual Environments},
    year = {2010},
    organization = {ECCV 2010 Crete, Greece},
    abstract = {In this paper we propose a novel approach to bundle adjustment for large-scale camera configurations. The method does not need to include the 3D points in the optimization as parameters. Additionally, we model the parameters of a camera only relative to a nearby camera to achieve a stable estimation of all cameras. This guarantees to yield a normal equation system with a numerical condition, which practically is independent of the number of images. Secondly, instead of using the classical perspective relation between object point, camera and image point, we use epipolar and trifocal constraints to implicitly establish the relations between the cameras via the object structure. This avoids the explicit reference to 3D points thereby handling points far from the camera in a numerically stable fashion. We demonstrate the resulting stability and high convergence rates using synthetic and real data.},
    doi = {10.1007/978-3-642-35740-4_22},
    url = {https://www.ipb.uni-bonn.de/pdfs/Steffen2010Relative.pdf},
    }

  • J. Sturm, A. Jain, C. Stachniss, C. C. Kemp, and W. Burgard, “Robustly Operating Articulated Objects based on Experience,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Taipei, Taiwan, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{sturm2010b,
    title = {Robustly Operating Articulated Objects based on Experience},
    author = {J. Sturm and A. Jain and C. Stachniss and C.C. Kemp and W. Burgard},
    booktitle = iros,
    year = {2010},
    address = {Taipei, Taiwan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/sturm10iros.pdf},
    }

  • J. Sturm, K. Konolige, C. Stachniss, and W. Burgard, “Vision-based Detection for Learning Articulation Models of Cabinet Doors and Drawers in Household Environments,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Anchorage, Alaska, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{sturm2010,
    title = {Vision-based Detection for Learning Articulation Models of Cabinet Doors and Drawers in Household Environments},
    author = {J. Sturm and K. Konolige and C. Stachniss and W. Burgard},
    booktitle = icra,
    year = {2010},
    address = {Anchorage, Alaska},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/sturm10icra.pdf},
    }

  • J. Sturm, K. Konolige, C. Stachniss, and W. Burgard, “3D Pose Estimation, Tracking and Model Learning of Articulated Objects from Dense Depth Video using Projected Texture Stereo,” in Proc. of the Workshop RGB-D: Advanced Reasoning with Depth Cameras at Robotics: Science and Systems (RSS), Zaragoza, Spain, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{sturm2010a,
    title = {3D Pose Estimation, Tracking and Model Learning of Articulated Objects from Dense Depth Video using Projected Texture Stereo},
    author = {J. Sturm and K. Konolige and C. Stachniss and W. Burgard},
    booktitle = {Proc. of the Workshop RGB-D: Advanced Reasoning with Depth Cameras at Robotics: Science and Systems (RSS)},
    year = {2010},
    address = {Zaragoza, Spain},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/sturm10rssws.pdf},
    }

  • S. Valero, J. Chanussot, J. A. Benediktsson, H. Talbot, and B. Waske, “Advanced directional mathematical morphology for the detection of the road network in very high resolution remote sensing images,” Pattern Recognition Letters, vol. 31, iss. 10, p. 1120–1127, 2010. doi:10.1016/j.patrec.2009.12.018
    [BibTeX]

    Very high spatial resolution (VHR) images allow to feature man-made structures such as roads and thus enable their accurate analysis. Geometrical characteristics can be extracted using mathematical morphology. However, the prior choice of a reference shape (structuring element) introduces a shape-bias. This paper presents a new method for extracting roads in Very High Resolution remotely sensed images based on advanced directional morphological operators. The proposed approach introduces the use of Path Openings and Path Closings in order to extract structural pixel information. These morphological operators remain flexible enough to fit rectilinear and slightly curved structures since they do not depend on the choice of a structural element shape. As a consequence, they outperform standard approaches using rotating rectangular structuring elements. The method consists in building a granulometry chain using Path Openings and Path Closing to construct Morphological Profiles. For each pixel, the Morphological Profile constitutes the feature vector on which our road extraction is based. (C) 2009 Published by Elsevier B.V.

    @Article{valero2010advanced,
    title = {Advanced directional mathematical morphology for the detection of the road network in very high resolution remote sensing images},
    author = {Valero, Sivia and Chanussot, Jocelyn and Benediktsson, Jon Atli and Talbot, Huges and Waske, Bj\"orn},
    journal = {Pattern Recognition Letters},
    year = {2010},
    month = jul,
    number = {10},
    pages = {1120--1127},
    volume = {31},
    abstract = {Very high spatial resolution (VHR) images allow to feature man-made structures such as roads and thus enable their accurate analysis. Geometrical characteristics can be extracted using mathematical morphology. However, the prior choice of a reference shape (structuring element) introduces a shape-bias. This paper presents a new method for extracting roads in Very High Resolution remotely sensed images based on advanced directional morphological operators. The proposed approach introduces the use of Path Openings and Path Closings in order to extract structural pixel information. These morphological operators remain flexible enough to fit rectilinear and slightly curved structures since they do not depend on the choice of a structural element shape. As a consequence, they outperform standard approaches using rotating rectangular structuring elements. The method consists in building a granulometry chain using Path Openings and Path Closing to construct Morphological Profiles. For each pixel, the Morphological Profile constitutes the feature vector on which our road extraction is based. (C) 2009 Published by Elsevier B.V.},
    doi = {10.1016/j.patrec.2009.12.018},
    owner = {waske},
    si = {SI},
    sn = {0167-8655},
    tc = {4},
    timestamp = {2012.09.04},
    ut = {WOS:000279284000007},
    z8 = {2},
    z9 = {6},
    zb = {1},
    }

  • B. Waske, S. van der Linden, J. A. Benediktsson, A. Rabe, and P. Hostert, “Sensitivity of Support Vector Machines to Random Feature Selection in Classification of Hyperspectral Data,” IEEE Transactions on Geoscience and Remote Sensing, vol. 48, iss. 7, p. 2880–2889, 2010. doi:10.1109/TGRS.2010.2041784
    [BibTeX]

    The accuracy of supervised land cover classifications depends on factors such as the chosen classification algorithm, adequate training data, the input data characteristics, and the selection of features. Hyperspectral imaging provides more detailed spectral and spatial information on the land cover than other remote sensing resources. Over the past ten years, traditional and formerly widely accepted statistical classification methods have been superseded by more recent machine learning algorithms, e.g., support vector machines (SVMs), or by multiple classifier systems (MCS). This can be explained by limitations of statistical approaches with regard to high-dimensional data, multimodal classes, and often limited availability of training data. In the presented study, MCSs based on SVM and random feature selection (RFS) are applied to explore the potential of a synergetic use of the two concepts. We investigated how the number of selected features and the size of the MCS influence classification accuracy using two hyperspectral data sets, from different environmental settings. In addition, experiments were conducted with a varying number of training samples. Accuracies are compared with regular SVM and random forests. Experimental results clearly demonstrate that the generation of an SVM-based classifier system with RFS significantly improves overall classification accuracy as well as producer’s and user’s accuracies. In addition, the ensemble strategy results in smoother, i.e., more realistic, classification maps than those from stand-alone SVM. Findings from the experiments were successfully transferred onto an additional hyperspectral data set.

    @Article{waske2010sensitivity,
    title = {Sensitivity of Support Vector Machines to Random Feature Selection in Classification of Hyperspectral Data},
    author = {Waske, Bj\"orn and van der Linden, Sebastian and Benediktsson, Jon Atli and Rabe, Andreas and Hostert, Patrick},
    journal = {IEEE Transactions on Geoscience and Remote Sensing},
    year = {2010},
    month = jul,
    number = {7},
    pages = {2880--2889},
    volume = {48},
    abstract = {The accuracy of supervised land cover classifications depends on factors such as the chosen classification algorithm, adequate training data, the input data characteristics, and the selection of features. Hyperspectral imaging provides more detailed spectral and spatial information on the land cover than other remote sensing resources. Over the past ten years, traditional and formerly widely accepted statistical classification methods have been superseded by more recent machine learning algorithms, e.g., support vector machines (SVMs), or by multiple classifier systems (MCS). This can be explained by limitations of statistical approaches with regard to high-dimensional data, multimodal classes, and often limited availability of training data. In the presented study, MCSs based on SVM and random feature selection (RFS) are applied to explore the potential of a synergetic use of the two concepts. We investigated how the number of selected features and the size of the MCS influence classification accuracy using two hyperspectral data sets, from different environmental settings. In addition, experiments were conducted with a varying number of training samples. Accuracies are compared with regular SVM and random forests. Experimental results clearly demonstrate that the generation of an SVM-based classifier system with RFS significantly improves overall classification accuracy as well as producer's and user's accuracies. In addition, the ensemble strategy results in smoother, i.e., more realistic, classification maps than those from stand-alone SVM. Findings from the experiments were successfully transferred onto an additional hyperspectral data set.},
    doi = {10.1109/TGRS.2010.2041784},
    owner = {waske},
    sn = {0196-2892},
    tc = {10},
    timestamp = {2012.09.04},
    ut = {WOS:000281789800010},
    z8 = {0},
    z9 = {10},
    zb = {2},
    }

  • S. Wenzel and L. Hotz, “The Role of Sequences for Incremental Learning,” in ICAART 2010 – Proc. of the International Conf. on Agents and Artificial Intelligence, Valencia, Spain, 2010, p. 434–439.
    [BibTeX] [PDF]

    In this paper, we point out the role of sequences of samples for training an incremental learning method. We define characteristics of incremental learning methods to describe the influence of sample ordering on the performance of a learned model. We show the influence of sequence for two different types of incremental learning. One is aimed on learning structural models, the other on learning models to discriminate object classes. In both cases, we show the possibility to find good sequences before starting the training.

    @InProceedings{wenzel2010role,
    title = {The Role of Sequences for Incremental Learning},
    author = {Wenzel, Susanne and Hotz, Lothar},
    booktitle = {ICAART 2010 - Proc. of the International Conf. on Agents and Artificial Intelligence},
    year = {2010},
    address = {Valencia, Spain},
    editor = {Joaquim Filipe and Ana L. N. Fred and Bernadette Sharp},
    month = jan,
    pages = {434--439},
    publisher = {INSTICC Press},
    volume = {1},
    abstract = {In this paper, we point out the role of sequences of samples for training an incremental learning method. We define characteristics of incremental learning methods to describe the influence of sample ordering on the performance of a learned model. We show the influence of sequence for two different types of incremental learning. One is aimed on learning structural models, the other on learning models to discriminate object classes. In both cases, we show the possibility to find good sequences before starting the training.},
    isbn = {978-989-674-021-4},
    timestamp = {2011.01.18},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2010Role.pdf},
    }

  • K. M. Wurm, C. Dornhege, P. Eyerich, C. Stachniss, B. Nebel, and W. Burgard, “Coordinated Exploration with Marsupial Teams of Robots using Temporal Symbolic Planning,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Taipei, Taiwan, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{wurm2010a,
    title = {Coordinated Exploration with Marsupial Teams of Robots using Temporal Symbolic Planning},
    author = {K.M. Wurm and C. Dornhege and P. Eyerich and C. Stachniss and B. Nebel and W. Burgard},
    booktitle = iros,
    year = {2010},
    address = {Taipei, Taiwan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm10iros.pdf},
    }

  • K. M. Wurm, A. Hornung, M. Bennewitz, C. Stachniss, and W. Burgard, “OctoMap: A Probabilistic, Flexible, and Compact 3D Map Representation for Robotic Systems,” in Proc. of the ICRA 2010 Workshop on Best Practice in 3D Perception and Modeling for Mobile Manipulation, Anchorage, AK, USA, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{wurm2010,
    title = {{OctoMap}: A Probabilistic, Flexible, and Compact {3D} Map Representation for Robotic Systems},
    author = {K.M. Wurm and A. Hornung and M. Bennewitz and C. Stachniss and W. Burgard},
    booktitle = {Proc. of the ICRA 2010 Workshop on Best Practice in 3D Perception and Modeling for Mobile Manipulation},
    year = {2010},
    address = {Anchorage, AK, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm10icraws.pdf},
    }

  • K. M. Wurm, C. Stachniss, and G. Grisetti, “Bridging the Gap Between Feature- and Grid-based SLAM,” Robotics and Autonomous Systems, vol. 58, iss. 2, pp. 140-148, 2010. doi:10.1016/j.robot.2009.09.009
    [BibTeX] [PDF]
    [none]
    @Article{wurm2010b,
    title = {Bridging the Gap Between Feature- and Grid-based SLAM},
    author = {Wurm, K.M. and Stachniss, C. and Grisetti, G.},
    journal = jras,
    year = {2010},
    number = {2},
    pages = {140 - 148},
    volume = {58},
    abstract = {[none]},
    doi = {10.1016/j.robot.2009.09.009},
    issn = {0921-8890},
    timestamp = {2014.04.24},
    url = {https://ais.informatik.uni-freiburg.de/publications/papers/wurm10ras.pdf},
    }

  • M. Y. Yang, Y. Cao, W. Förstner, and J. McDonald, “Robust wide baseline scene alignment based on 3D viewpoint normalization,” in International Conf. on Advances in Visual Computing, 2010, p. 654–665. doi:10.1007/978-3-642-17289-2_63
    [BibTeX] [PDF]

    This paper presents a novel scheme for automatically aligning two widely separated 3D scenes via the use of viewpoint invariant features. The key idea of the proposed method is following. First, a number of dominant planes are extracted in the SfM 3D point cloud using a novel method integrating RANSAC and MDL to describe the underlying 3D geometry in urban settings. With respect to the extracted 3D planes, the original camera viewing directions are rectified to form the front-parallel views of the scene. Viewpoint invariant features are extracted on the canonical views to provide a basis for further matching. Compared to the conventional 2D feature detectors (e.g. SIFT, MSER), the resulting features have following advantages: (1) they are very discriminative and robust to perspective distortions and viewpoint changes due to exploiting scene structure; (2) the features contain useful local patch information which allow for efficient feature matching. Using the novel viewpoint invariant features, wide-baseline 3D scenes are automatically aligned in terms of robust image matching. The performance of the proposed method is comprehensively evaluated in our experiments. It’s demonstrated that 2D image feature matching can be significantly improved by considering 3D scene structure.

    @InProceedings{yang2010robust,
    title = {Robust wide baseline scene alignment based on 3D viewpoint normalization},
    author = {Yang, Michael Ying and Cao, Yanpeng and F\"orstner, Wolfgang and McDonald, John},
    booktitle = {International Conf. on Advances in Visual Computing},
    year = {2010},
    pages = {654--665},
    publisher = {Springer-Verlag},
    abstract = {This paper presents a novel scheme for automatically aligning two widely separated 3D scenes via the use of viewpoint invariant features. The key idea of the proposed method is following. First, a number of dominant planes are extracted in the SfM 3D point cloud using a novel method integrating RANSAC and MDL to describe the underlying 3D geometry in urban settings. With respect to the extracted 3D planes, the original camera viewing directions are rectified to form the front-parallel views of the scene. Viewpoint invariant features are extracted on the canonical views to provide a basis for further matching. Compared to the conventional 2D feature detectors (e.g. SIFT, MSER), the resulting features have following advantages: (1) they are very discriminative and robust to perspective distortions and viewpoint changes due to exploiting scene structure; (2) the features contain useful local patch information which allow for efficient feature matching. Using the novel viewpoint invariant features, wide-baseline 3D scenes are automatically aligned in terms of robust image matching. The performance of the proposed method is comprehensively evaluated in our experiments. It's demonstrated that 2D image feature matching can be significantly improved by considering 3D scene structure.},
    doi = {10.1007/978-3-642-17289-2_63},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2010Robust.pdf},
    }

  • M. Y. Yang and W. Förstner, “Plane Detection in Point Cloud Data,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2010-01, 2010.
    [BibTeX] [PDF]

    Plane detection is a prerequisite to a wide variety of vision tasks. RANdom SAmple Consensus (RANSAC) algorithm is widely used for plane detection in point cloud data. Minimum description length (MDL) principle is used to deal with several competing hypothesis. This paper presents a new approach to the plane detection by integrating RANSAC and MDL. The method could avoid detecting wrong planes due to the complex geometry of the 3D data. The paper tests the performance of proposed method on both synthetic and real data.

    @TechReport{yang2010plane,
    title = {Plane Detection in Point Cloud Data},
    author = {Yang, Michael Ying and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2010},
    number = {TR-IGG-P-2010-01 },
    abstract = {Plane detection is a prerequisite to a wide variety of vision tasks. RANdom SAmple Consensus (RANSAC) algorithm is widely used for plane detection in point cloud data. Minimum description length (MDL) principle is used to deal with several competing hypothesis. This paper presents a new approach to the plane detection by integrating RANSAC and MDL. The method could avoid detecting wrong planes due to the complex geometry of the 3D data. The paper tests the performance of proposed method on both synthetic and real data.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2010Plane.pdf},
    }

  • M. Y. Yang, W. Förstner, and M. Drauschke, “Hierarchical Conditional Random Field for Multi-class Image Classification,” in International Conf. on Computer Vision Theory and Applications (VISSAPP), 2010, p. 464–469.
    [BibTeX] [PDF]

    Multi-class image classification has made significant advances in recent years through the combination of local and global features. This paper proposes a novel approach called hierarchical conditional random field (HCRF) that explicitly models region adjacency graph and region hierarchy graph structure of an image. This allows to set up a joint and hierarchical model of local and global discriminative methods that augments conditional random field to a multi-layer model. Region hierarchy graph is based on a multi-scale watershed segmentation.

    @InProceedings{yang2010hierarchical,
    title = {Hierarchical Conditional Random Field for Multi-class Image Classification},
    author = {Yang, Michael Ying and F\"orstner, Wolfgang and Drauschke, Martin},
    booktitle = {International Conf. on Computer Vision Theory and Applications (VISSAPP)},
    year = {2010},
    pages = {464--469},
    abstract = {Multi-class image classification has made significant advances in recent years through the combination of local and global features. This paper proposes a novel approach called hierarchical conditional random field (HCRF) that explicitly models region adjacency graph and region hierarchy graph structure of an image. This allows to set up a joint and hierarchical model of local and global discriminative methods that augments conditional random field to a multi-layer model. Region hierarchy graph is based on a multi-scale watershed segmentation.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2011Hierarchical.pdf},
    }

2009

  • B. Abendroth and M. zur Mühlen, “Genauigkeitsbeurteilung und Untersuchungen der Zuverlässigkeit von optischen Onlinemessungen,” Diploma Thesis Master Thesis, 2009.
    [BibTeX] [PDF]

    Vorwort Der Titel “Genauigkeitsbeurteilung und Untersuchungen der Zuverlässigkeit von optischen Onlinemessungen” impliziert eine weite Bandbreite an Untersuchungsmöglichkeiten. Diese allgemeine Einführung gibt einen Überblick über die untersuchten Aspekte dieser Arbeit. Neben der Motivation, die zu der Entstehung dieser Diplomarbeit geführt hat, beinhaltet diese Einführung eine grobe Gliederung der behandelten Themenschwerpunkte. Motivation Ein neues Aufgabengebiet innerhalb der Nahbereichsphotogrammetrie bietet die Konzeption von photogrammetrischen Messsystemen für industrielle Aufgabenstellungen. Die Firma AICON 3D Systems GmbH, mit deren Kooperation diese Arbeit entstand, hat sich auf die Entwicklung solcher Systeme spezialisiert. Sie gehört zu den weltweit führenden Unternehmen im Bereich der optischen kamerabasierten 3D-Vermessung. Ihr Anspruch ist es, hochgenaue und effiziente Produkte im Bereich von Inspektion und Prüfung zu entwickeln und zu überwachen. Ihre Produkte vertreibt das 1990 gegründete Unternehmen überwiegend in der Automobil-, Luft- und Raumfahrtindustrie sowie im Anlagen- und Schiffsbau. Zur Erfassung von dynamischen Vorgängen bietet das Unternehmen echtzeitfähige optische Messsysteme an, die je nach Konfiguration in der Lage sind einzelne signalisierte Punkte als 3D-Koordinaten zu erfassen oder die Bewegung eines Starrkörpers aufzunehmen. Damit diese photogrammetrischen Systeme gegenüber anderen Messsystemen im Konkurrenzkampf bestehen können, müssen sich diese einer ständigen Weiterentwicklung und Verbesserung unterziehen. Dabei steht insbesondere die Wirtschaftlichkeit, Zuverlässigkeit und die Genauigkeit der Systeme im Vordergrund. Außerdem ist es nötig einzelne Messsysteme zu charakterisieren, um sie vergleichbar zu machen und die Einsatzmöglichkeiten aufzuzeigen. Dazu gehört neben den oben genannten Kriterien der Genauigkeit, Zuverlässigkeit und Wirtschaftlichkeit auch das Spektrum der Einsatzmöglichkeiten mit systemspezifischen Rahmenbedingungen. Des Weiteren kann ein Vergleich über Hardware- und Software-Module geschehen. Im weiteren Verlauf dieser Arbeit werden die Eigenschaften der Genauigkeit und der Software-Module näher untersucht. Dabei zeigt eine Genauigkeitsuntersuchung die Grenzen der Messsysteme auf, deren Kenntnis für die Weiterentwicklung von Bedeutung ist. Für die Verbesserung der Software wird diese anhand ihrer vorhandenen Algorithmik untersucht und mit alternativen Berechnungverfahren verglichen. Als Ausgangspunkt für diese Untersuchungen dienen dabei die beiden Onlinemesssysteme WHEELWATCH und MoveINSPECT der Firma AICON 3D Systems GmbH. Die Motivation der Firma AICON 3D Systems GmbH ein Diplomarbeitsthema im Bereich einer Genauigkeitsuntersuchung zu stellen, liegt darin, das vorhandene theoretische ‘Wissen der Universität mit dem praktischen Anwendungsbeispiel der Onlinemesssysteme zu verbinden. Dies gilt auch für den Bereich der Weiterentwicklung der Algorithmik. Damit die Systeme auch in Zukunft wettbewerbsfähig sind, müssen diese ständig weiter entwickelt werden. Aus diesem Grund beinhaltet diese Arbeit die Untersuchung von zwei verschiedenen Problemstellungen, die sich innerhalb der Algorithmen der Systeme ergeben. Aufgabenstellung Das Ziel dieser gesamten Diplomarbeit besteht in der Verbesserung und Weiterentwicklung von Onlinemesssystemen. Dabei sollen theoretische Verfahren, die an der Universität vermittelt werden, auf die speziellen Messsysteme WHEELWATCH und MOVEINSPECT der Firma AICON 3D Systems GmbH angepasst und modifiziert werden. Insbesonders geht es um drei Aspekte der Onlinemesssysteme. Als erstes soll eine Gnauigkeitsuntersuchung der Onlinemesssysteme in Hinblick auf die Bewegungserfassung von Starrkörpern durchgeführt werden. Hierbei dienen statistische Grundlagen dazu, die bisherigen Genauigkeitsangaben von AICON 3D Systems GmbH durch eine statistisch fundierte Angabe zu validieren. Die allgemeine Problemstellung bezieht sich auf die Entwicklung eines Testverfahrens für die Spezifikation von Genauigkeitsangaben der detektierten Bewegung von starren Objekten, die sich im Nahbereich des Messsystems befinden. Unter Nahbereich ist hier eine maximale Entfernung von bis zu 3m zu verstehen. Die nächsten zwei Teilaspekte bestehen in der Beurteilung der bestehenden Algorithmik und dessen Verbesserung durch alternative Lösungsansätze. Hier handelt es sich um zwei allgemeine Probleme. Für das Einkamerasystem werden direkte Lösungsmöglichkeiten des Räumlichen Rückwärtsschnittes aufgezeigt. Im Fall eines Zweikamerasystems findet eine Verbesserung der Punktzuordnung statt. Diese basiert insbesondere auf der Berücksichtigung der Oberfläche der Objekte. Diese Teilaufgaben werden in den einleitenden Abschnitten der drei Teile genau spezifiziert. Aufbau der Arbeit Diese Diplomarbeit befasst sich mit drei verschiedenen Aspekten der Onlinemesssysteme WHEELWATCH und MoveINSPECT. Aus diesem Grund besteht dieses Dokument aus drei großen Teilen. Der Teil I trägt den Titel “Genauigkeitsbeurteilung von optischen Onlinemesssystemen”. Darunter befindet sich die Entwicklung von zwei Testverfahren, die speziell für die Systeme WHEELWATCH und MoveINSPECT konzipiert werden. Dabei beziehen sich die Testverfahren zur Untersuchung der Genauigkeit zum einen auf Strecken und zum anderen auf Winkel. Die genaue Vorgehensweise ist dem ersten Teil dieser Arbeit zu entnehmen. Die beiden folgenden Teile befassen sich mit der Verbesserung der Algorithmik der Onlinemesssysteme. Dabei stellt der Teil Il Alternativen zum Räumlicher Rückwärtsschnitt (RRS) des Systems WHEEL WATCH vor. Das Ziel dieses Abschnitts ist es das bisher von der Firma AICON 3D Systems GmbH implementierte iterative Verfahren des RRS durch ein direktes zu erweitern. Die direkte Lösung des RRS dient dann zur Bestimmung der Näherungswerte für das iterative Verfahren. Mit der Algorithmik des Systems MoveINSPECT befasst sich der Teil III. Hier werden neue Ansatzmöglichkeiten aufgezeigt, um das Problem der Zuordnung von uncodierten Marken bei einem Zweikamerasystem zu verringern.

    @MastersThesis{abendroth2009genauigkeitsbeurteilung,
    title = {Genauigkeitsbeurteilung und Untersuchungen der Zuverl\"assigkeit von optischen Onlinemessungen},
    author = {Abendroth, Birgit and zur M\"uhlen, Miriam},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2009},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Inform. Timo Dickscheid, Dipl.-Ing. Robert Godding},
    type = {Diploma Thesis},
    abstract = {Vorwort Der Titel "Genauigkeitsbeurteilung und Untersuchungen der Zuverl\"assigkeit von optischen Onlinemessungen" impliziert eine weite Bandbreite an Untersuchungsm\"oglichkeiten. Diese allgemeine Einf\"uhrung gibt einen \"Uberblick \"uber die untersuchten Aspekte dieser Arbeit. Neben der Motivation, die zu der Entstehung dieser Diplomarbeit gef\"uhrt hat, beinhaltet diese Einf\"uhrung eine grobe Gliederung der behandelten Themenschwerpunkte. Motivation Ein neues Aufgabengebiet innerhalb der Nahbereichsphotogrammetrie bietet die Konzeption von photogrammetrischen Messsystemen f\"ur industrielle Aufgabenstellungen. Die Firma AICON 3D Systems GmbH, mit deren Kooperation diese Arbeit entstand, hat sich auf die Entwicklung solcher Systeme spezialisiert. Sie geh\"ort zu den weltweit f\"uhrenden Unternehmen im Bereich der optischen kamerabasierten 3D-Vermessung. Ihr Anspruch ist es, hochgenaue und effiziente Produkte im Bereich von Inspektion und Pr\"ufung zu entwickeln und zu \"uberwachen. Ihre Produkte vertreibt das 1990 gegr\"undete Unternehmen \"uberwiegend in der Automobil-, Luft- und Raumfahrtindustrie sowie im Anlagen- und Schiffsbau. Zur Erfassung von dynamischen Vorg\"angen bietet das Unternehmen echtzeitf\"ahige optische Messsysteme an, die je nach Konfiguration in der Lage sind einzelne signalisierte Punkte als 3D-Koordinaten zu erfassen oder die Bewegung eines Starrk\"orpers aufzunehmen. Damit diese photogrammetrischen Systeme gegen\"uber anderen Messsystemen im Konkurrenzkampf bestehen k\"onnen, m\"ussen sich diese einer st\"andigen Weiterentwicklung und Verbesserung unterziehen. Dabei steht insbesondere die Wirtschaftlichkeit, Zuverl\"assigkeit und die Genauigkeit der Systeme im Vordergrund. Au{\ss}erdem ist es n\"otig einzelne Messsysteme zu charakterisieren, um sie vergleichbar zu machen und die Einsatzm\"oglichkeiten aufzuzeigen. Dazu geh\"ort neben den oben genannten Kriterien der Genauigkeit, Zuverl\"assigkeit und Wirtschaftlichkeit auch das Spektrum der Einsatzm\"oglichkeiten mit systemspezifischen Rahmenbedingungen. Des Weiteren kann ein Vergleich \"uber Hardware- und Software-Module geschehen. Im weiteren Verlauf dieser Arbeit werden die Eigenschaften der Genauigkeit und der Software-Module n\"aher untersucht. Dabei zeigt eine Genauigkeitsuntersuchung die Grenzen der Messsysteme auf, deren Kenntnis f\"ur die Weiterentwicklung von Bedeutung ist. F\"ur die Verbesserung der Software wird diese anhand ihrer vorhandenen Algorithmik untersucht und mit alternativen Berechnungverfahren verglichen. Als Ausgangspunkt f\"ur diese Untersuchungen dienen dabei die beiden Onlinemesssysteme WHEELWATCH und MoveINSPECT der Firma AICON 3D Systems GmbH. Die Motivation der Firma AICON 3D Systems GmbH ein Diplomarbeitsthema im Bereich einer Genauigkeitsuntersuchung zu stellen, liegt darin, das vorhandene theoretische 'Wissen der Universit\"at mit dem praktischen Anwendungsbeispiel der Onlinemesssysteme zu verbinden. Dies gilt auch
    f\"ur den Bereich der Weiterentwicklung der Algorithmik. Damit die Systeme auch in Zukunft wettbewerbsf\"ahig sind, m\"ussen diese st\"andig weiter entwickelt werden. Aus diesem Grund beinhaltet diese Arbeit die Untersuchung von zwei verschiedenen Problemstellungen, die sich innerhalb der Algorithmen der Systeme ergeben. Aufgabenstellung Das Ziel dieser gesamten Diplomarbeit besteht in der Verbesserung und Weiterentwicklung von Onlinemesssystemen. Dabei sollen theoretische Verfahren, die an der Universit\"at vermittelt werden, auf die speziellen Messsysteme WHEELWATCH und MOVEINSPECT der Firma AICON 3D Systems GmbH angepasst und modifiziert werden. Insbesonders geht es um drei Aspekte der Onlinemesssysteme. Als erstes soll eine Gnauigkeitsuntersuchung der Onlinemesssysteme in Hinblick auf die Bewegungserfassung von Starrk\"orpern durchgef\"uhrt werden. Hierbei dienen statistische Grundlagen dazu, die bisherigen Genauigkeitsangaben von AICON 3D Systems GmbH durch eine statistisch fundierte Angabe zu validieren. Die allgemeine Problemstellung bezieht sich auf die Entwicklung eines Testverfahrens f\"ur die Spezifikation von Genauigkeitsangaben der detektierten Bewegung von starren Objekten, die sich im Nahbereich des Messsystems befinden. Unter Nahbereich ist hier eine maximale Entfernung von bis zu 3m zu verstehen. Die n\"achsten zwei Teilaspekte bestehen in der Beurteilung der bestehenden Algorithmik und dessen Verbesserung durch alternative L\"osungsans\"atze. Hier handelt es sich um zwei allgemeine Probleme. F\"ur das Einkamerasystem werden direkte L\"osungsm\"oglichkeiten des R\"aumlichen R\"uckw\"artsschnittes aufgezeigt. Im Fall eines Zweikamerasystems findet eine Verbesserung der Punktzuordnung statt. Diese basiert insbesondere auf der Ber\"ucksichtigung der Oberfl\"ache der Objekte. Diese Teilaufgaben werden in den einleitenden Abschnitten der drei Teile genau spezifiziert. Aufbau der Arbeit Diese Diplomarbeit befasst sich mit drei verschiedenen Aspekten der Onlinemesssysteme WHEELWATCH und MoveINSPECT. Aus diesem Grund besteht dieses Dokument aus drei gro{\ss}en Teilen. Der Teil I tr\"agt den Titel "Genauigkeitsbeurteilung von optischen Onlinemesssystemen". Darunter befindet sich die Entwicklung von zwei Testverfahren, die speziell f\"ur die Systeme WHEELWATCH und MoveINSPECT konzipiert werden. Dabei beziehen sich die Testverfahren zur Untersuchung der Genauigkeit zum einen auf Strecken und zum anderen auf Winkel. Die genaue Vorgehensweise ist dem ersten Teil dieser Arbeit zu entnehmen. Die beiden folgenden Teile befassen sich mit der Verbesserung der Algorithmik der Onlinemesssysteme. Dabei stellt der Teil Il Alternativen zum R\"aumlicher R\"uckw\"artsschnitt (RRS) des Systems WHEEL WATCH vor. Das Ziel dieses Abschnitts ist es das bisher von der Firma AICON 3D Systems GmbH implementierte iterative Verfahren des RRS durch ein direktes zu erweitern. Die direkte L\"osung des RRS dient dann zur Bestimmung der
    N\"aherungswerte f\"ur das iterative Verfahren. Mit der Algorithmik des Systems MoveINSPECT befasst sich der Teil III. Hier werden neue Ansatzm\"oglichkeiten aufgezeigt, um das Problem der Zuordnung von uncodierten Marken bei einem Zweikamerasystem zu verringern.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Abendroth2009Genauigkeitsbeurteilung.pdf},
    }

  • A. Barth, J. Siegemund, U. Franke, and W. Förstner, “Simultaneous Estimation of Pose and Motion at Highly Dynamic Turn Maneuvers,” in 31th Annual Symposium of the German Association for Pattern Recognition (DAGM), Jena, Germany, 2009, p. 262–271. doi:10.1007/978-3-642-03798-6_27
    [BibTeX] [PDF]

    Abstract. The (Extended) Kalman filter has been established as a stan- dard method for object tracking. While a constraining motion model stabilizes the tracking results given noisy measurements, it limits the ability to follow an object in non-modeled maneuvers. In the context of a stereo-vision based vehicle tracking approach, we propose and compare three different strategies to automatically adapt the dynamics of the fil- ter to the dynamics of the object. These strategies include an IMM-based multi-filter setup, an extension of the motion model considering higher order terms, as well as the adaptive parametrization of the filter vari- ances using an independent maximum likelihood estimator. For evalua- tion, various recorded real world trajectories and simulated maneuvers, including skidding, are used. The experimental results show significant improvements in the simultaneous estimation of pose and motion.

    @InProceedings{barth2009simultaneous,
    title = {Simultaneous Estimation of Pose and Motion at Highly Dynamic Turn Maneuvers},
    author = {Barth, Alexander and Siegemund, Jan and Franke, Uwe and F\"orstner, Wolfgang},
    booktitle = {31th Annual Symposium of the German Association for Pattern Recognition (DAGM)},
    year = {2009},
    address = {Jena, Germany},
    editor = {Denzler, J. and Notni, G.},
    pages = {262--271},
    publisher = {Springer},
    abstract = {Abstract. The (Extended) Kalman filter has been established as a stan- dard method for object tracking. While a constraining motion model stabilizes the tracking results given noisy measurements, it limits the ability to follow an object in non-modeled maneuvers. In the context of a stereo-vision based vehicle tracking approach, we propose and compare three different strategies to automatically adapt the dynamics of the fil- ter to the dynamics of the object. These strategies include an IMM-based multi-filter setup, an extension of the motion model considering higher order terms, as well as the adaptive parametrization of the filter vari- ances using an independent maximum likelihood estimator. For evalua- tion, various recorded real world trajectories and simulated maneuvers, including skidding, are used. The experimental results show significant improvements in the simultaneous estimation of pose and motion.},
    doi = {10.1007/978-3-642-03798-6_27},
    url = {https://www.ipb.uni-bonn.de/pdfs/Bart2009Simultaneous.pdf},
    }

  • S. D. Bauer, F. Korč, and W. Förstner, “Investigation into the classification of diseases of sugar beet leaves using multispectral images,” in Precision Agriculture 2009, Wageningen, 2009, p. 229–238.
    [BibTeX] [PDF]

    This paper reports on methods for the automatic detection and classification of leaf diseases based on high resolution multispectral images. Leaf diseases are economically important as they could cause a yield loss. Early and reliable detection of leaf diseases therefore is of utmost practical relevance – especially in the context of precision agriculture for localized treatment with fungicides. Our interest is the analysis of sugar beet due to their economical impact. Leaves of sugar beet may be infected by several diseases, such as rust (Uromyces betae), powdery mildew (Erysiphe betae) and other leaf spot diseases (Cercospora beticola and Ramularia beticola). In order to obtain best classification results we apply conditional random fields. In contrast to pixel based classifiers we are able to model the local context and contrary to object centred classifiers we simultaneously segment and classify the image. In a first investigation we analyse multispectral images of single leaves taken in a lab under well controlled illumination conditions. The photographed sugar beet leaves are healthy or either infected with the leaf spot pathogen Cercospora beticola or with the rust fungus Uromyces betae. We compare the classification methods pixelwise maximum posterior classification (MAP), objectwise MAP as soon as global MAP and global maximum posterior marginal classification using the spatial context within a conditional random field model.

    @InProceedings{bauer2009investigation,
    title = {Investigation into the classification of diseases of sugar beet leaves using multispectral images},
    author = {Bauer, Sabine Daniela and Kor{\vc}, Filip and F\"orstner, Wolfgang},
    booktitle = {Precision Agriculture 2009},
    year = {2009},
    address = {Wageningen},
    pages = {229--238},
    abstract = {This paper reports on methods for the automatic detection and classification of leaf diseases based on high resolution multispectral images. Leaf diseases are economically important as they could cause a yield loss. Early and reliable detection of leaf diseases therefore is of utmost practical relevance - especially in the context of precision agriculture for localized treatment with fungicides. Our interest is the analysis of sugar beet due to their economical impact. Leaves of sugar beet may be infected by several diseases, such as rust (Uromyces betae), powdery mildew (Erysiphe betae) and other leaf spot diseases (Cercospora beticola and Ramularia beticola). In order to obtain best classification results we apply conditional random fields. In contrast to pixel based classifiers we are able to model the local context and contrary to object centred classifiers we simultaneously segment and classify the image. In a first investigation we analyse multispectral images of single leaves taken in a lab under well controlled illumination conditions. The photographed sugar beet leaves are healthy or either infected with the leaf spot pathogen Cercospora beticola or with the rust fungus Uromyces betae. We compare the classification methods pixelwise maximum posterior classification (MAP), objectwise MAP as soon as global MAP and global maximum posterior marginal classification using the spatial context within a conditional random field model.},
    city = {Bonn},
    proceeding = {Precision Agriculture},
    url = {https://www.ipb.uni-bonn.de/pdfs/Bauer2009Investigation.pdf},
    }

  • D. Bender, “3D-Rekonstruktion von Blatträndern,” Diploma Thesis Master Thesis, 2009.
    [BibTeX]

    \textbf{Einleitung} Der Anbau von Pflanzen in der Landwirtschaft ist durch eine zunehmende Automatisierung geprägt. Unter anderem werden hierbei Verfahren der Bildverarbeitung eingesetzt, welche zum Beispiel eine Beobachtung von Wachstum, Krankheiten oder Reifegrad der Pflanze sowie die Erkennung von Unkraut ermöglichen. Ausgehend von den Ergebnissen kann eine optimierte Produktion vollzogen und infolgedessen der Ertrag erhöht werden. Bereits an diesen Einsatzgebieten lässt sich erkennen, warum ein großes Interesse an der Verwendung von Bildverarbeitungsverfahren beim Pflanzenanbau besteht. \textbf{1.1 AufgabensteIlung} Ziel dieser Diplomarbeit ist es, eine Anwendung zu entwickeln, welche die automatische 3-D-Rekonstruktion von Blatträndern ermöglicht. Dazu wird, aufbauend auf die 2-D-Konturen mehrerer Aufnahmen eines Blattes, ein Energieminimierungsansatz entwickelt, durch den die optimale 3-D-Kontur berechnet werden kann. Dieses Verfahren wird mit realen Aufnahmen von Rübenblättern getestet, wodurch jedoch nur ein visueller Eindruck über die Qualität der Ergebnisse gewonnen werden kann. Um fundierte Aussagen über die Qualität der Ergebnisse treffen zu können, soll im Anschluss ein Verfahren zur Erstellung von synthetischen Szenen erarbeitet und mit diesen eine statistische Auswertung vollzogen werden. \textbf{1.2 Motivation} Als Silhouette wird der Umriss eines abgebildeten Körpers beschrieben. Sie ist in den meisten Aufnahmen leicht zu extrahieren und häufig der stärkste Hinweis für das abgebildete 3-D-Modell. Aus diesem Grund wird in verschiedenen Verfahren zur vollständigen 3-D-Rekonstruktion die Projektion des 3-D-Modells auf die Silhouetten der Aufnahmen als Kriterium verwendet [PZF05]. Bei Abbildungen von Blättern stimmt für Aufnahmen ohne Scheinkonturen die Silhouette mit der jeweiligen Kontur des Blattes überein. Dies ermöglicht eine Rekonstruktion des Blattrandes durch den in der vorliegenden Arbeit beschriebenen Algorithmus. Ausgehend von dieser Kurve im 3-D-Raum können bereits Aussagen über das Wachstum einer Pflanze getroffen oder bestimmte Klassifizierungen vorgenommen werden. Des Weiteren kann basierend auf der berechneten 3- D- Kontur eine vollständige 3- D- Rekonstruktion vollzogen werden. Insbesondere bei Blättern sind hierbei der Einfluss der Beleuchtung und hierdurch auftretende Spiegelungen innerhalb des Blattes zu beachten, welche die komplette 3-D-Rekonstruktion erheblich erschweren. Möglicherweise kann die Qualität einer kompletten 3-D-Rekonstruktion durch die Übergabe der bekannten 3-D-Kontur verbessert werden. \textbf{1.3 Verwandte Arbeiten} Bei der Rekonstruktion von 3-D-Kurven durch ihre Abbildungen in mehrere Bilder handelt es sich um ein Problem, welches bis zum aktuellen Zeitpunkt noch nicht umfassend erforscht worden ist. Jedoch sind vereinzelt Arbeiten zu finden, welche die Fragestellung bearbeiten. Von diesen werden im Folgenden zwei Arbeiten kurz beschrieben, deren Hauptaugenmerk ebenfalls auf der 3-D-Rekonstruktion von Blätträndern liegt: In [ZWZY08] wird ein Verfahren zur 3-D-Rekonstruktion von Maispflanzen vorgestellt, wobei die Rekonstruktion eines Blattes (Abbildung 1.1) auf zwei Aufnahmen basiert. In diesen werden mithilfe des Canny-Algorithmus [Can86] Kanten extrahiert, aus welchen eine automatische Auswahl getroffen wird. Die anschließende Zuordnung homologer Kanten wird jedoch manuell vollzogen. Es folgen die 3-D-Rekonstruktionen des Blattrandes und der in einer Maispflanze zentral verlaufenden Blattader durch ein Schneiden der Kurven im Raum. Anschließend wird in der Arbeit eine Oberfläche ausgehend von den gefundenen 3-D-Konturen triangliert. Dies ist möglich, da Blätter von Maispflanzen sehr schmal sind und daher die rekonstruierten 3-D-Konturen nahe beieinander liegen. In [Nie04] wird die 3-D-Rekonstruktion von Blättern junger Maispflanzen mit NURBS [PT96] vollzogen. Dabei werden zunächst die Konturen der Blätter manuell gekennzeichnet, um anschließend als Eingabe für die Konstruktion des 3-D-Modells zu dienen. Die theoretische Grundlage ist ein Verfahren, das für eine spezielle Konfiguration von drei Kameras eine 3-D-NURBS-Kurve eines freigeformten, linienähnlichen Objektes konstruiert [DXP+ü3]. Zunächst wird dazu die Abbildung des Objektes in den jeweiligen Bildern in Form von 2-D-NURBS-Kurven approximiert. Sind diese in allen Bildern durch eine gleiche Anzahl von Kontrollpunkten und einen übereinstimmenden Knotenvektor dargestellt, so können die Kontrollpunkte im 3-D-Raum rekonstruiert werden und führen zur gesuchten Rekonstruktion durch eine 3-D-NURBS-Kurve. \textbf{1.4 Aufbau der Arbeit} Zu Beginn wird in Kapitel 2 die Vorverarbeitung der Eingabebilder beschrieben. In diesen wird zunächst mit einem Graph-Cut- Verfahren das Blatt segmentiert und anschließend seine Kontur extrahiert. Es folgt die Berechnung einer Distanztransformation des Konturbildes, wodurch für jeden Bildpunkt der Abstand zur Kontur angegeben wird. In Kapitel 3 wird der in dieser Arbeit vorgestellte Algorithmus zur 3-D-Rekonstruktion des Blattrandes beschrieben. Anschließend werden in Kapitel 4 die Erstellung einer synthetischen Szene zur Bewertung der Ergebnisse und die verwendeten Mittel zur statistischen Auswertung der Fehler dargestellt. In Kapitel 5 werden für reale und synthetische Bilder die Ergebnisse durchgeführter Experimente präsentiert und erörtert. Zum Abschluss der Arbeit folgen in Kapitel 6 eine Zusammenfassung und ein Ausblick auf mögliche Weiterführungen und Alternativen des vorgestellten Verfahrens.

    @MastersThesis{bender20093d,
    title = {3D-Rekonstruktion von Blattr\"andern},
    author = {Bender, Daniel},
    school = {University of Bonn},
    year = {2009},
    note = {Betreuung: Prof.Dr.-Ing. Wolfgang F\"orstner, Prof.Dr. Daniel Cremers},
    type = {Diploma Thesis},
    abstract = {\textbf{Einleitung} Der Anbau von Pflanzen in der Landwirtschaft ist durch eine zunehmende Automatisierung gepr\"agt. Unter anderem werden hierbei Verfahren der Bildverarbeitung eingesetzt, welche zum Beispiel eine Beobachtung von Wachstum, Krankheiten oder Reifegrad der Pflanze sowie die Erkennung von Unkraut erm\"oglichen. Ausgehend von den Ergebnissen kann eine optimierte Produktion vollzogen und infolgedessen der Ertrag erh\"oht werden. Bereits an diesen Einsatzgebieten l\"asst sich erkennen, warum ein gro{\ss}es Interesse an der Verwendung von Bildverarbeitungsverfahren beim Pflanzenanbau besteht. \textbf{1.1 AufgabensteIlung} Ziel dieser Diplomarbeit ist es, eine Anwendung zu entwickeln, welche die automatische 3-D-Rekonstruktion von Blattr\"andern erm\"oglicht. Dazu wird, aufbauend auf die 2-D-Konturen mehrerer Aufnahmen eines Blattes, ein Energieminimierungsansatz entwickelt, durch den die optimale 3-D-Kontur berechnet werden kann. Dieses Verfahren wird mit realen Aufnahmen von R\"ubenbl\"attern getestet, wodurch jedoch nur ein visueller Eindruck \"uber die Qualit\"at der Ergebnisse gewonnen werden kann. Um fundierte Aussagen \"uber die Qualit\"at der Ergebnisse treffen zu k\"onnen, soll im Anschluss ein Verfahren zur Erstellung von synthetischen Szenen erarbeitet und mit diesen eine statistische Auswertung vollzogen werden. \textbf{1.2 Motivation} Als Silhouette wird der Umriss eines abgebildeten K\"orpers beschrieben. Sie ist in den meisten Aufnahmen leicht zu extrahieren und h\"aufig der st\"arkste Hinweis f\"ur das abgebildete 3-D-Modell. Aus diesem Grund wird in verschiedenen Verfahren zur vollst\"andigen 3-D-Rekonstruktion die Projektion des 3-D-Modells auf die Silhouetten der Aufnahmen als Kriterium verwendet [PZF05]. Bei Abbildungen von Bl\"attern stimmt f\"ur Aufnahmen ohne Scheinkonturen die Silhouette mit der jeweiligen Kontur des Blattes \"uberein. Dies erm\"oglicht eine Rekonstruktion des Blattrandes durch den in der vorliegenden Arbeit beschriebenen Algorithmus. Ausgehend von dieser Kurve im 3-D-Raum k\"onnen bereits Aussagen \"uber das Wachstum einer Pflanze getroffen oder bestimmte Klassifizierungen vorgenommen werden. Des Weiteren kann basierend auf der berechneten 3- D- Kontur eine vollst\"andige 3- D- Rekonstruktion vollzogen werden. Insbesondere bei Bl\"attern sind hierbei der Einfluss der Beleuchtung und hierdurch auftretende Spiegelungen innerhalb des Blattes zu beachten, welche die komplette 3-D-Rekonstruktion erheblich erschweren. M\"oglicherweise kann die Qualit\"at einer kompletten 3-D-Rekonstruktion durch die \"Ubergabe der bekannten 3-D-Kontur verbessert werden. \textbf{1.3 Verwandte Arbeiten} Bei der Rekonstruktion von 3-D-Kurven durch ihre Abbildungen in mehrere Bilder handelt es sich um ein Problem, welches bis zum aktuellen Zeitpunkt noch nicht umfassend erforscht worden ist. Jedoch sind vereinzelt Arbeiten zu finden, welche die Fragestellung bearbeiten. Von diesen werden im Folgenden
    zwei Arbeiten kurz beschrieben, deren Hauptaugenmerk ebenfalls auf der 3-D-Rekonstruktion von Bl\"attr\"andern liegt: In [ZWZY08] wird ein Verfahren zur 3-D-Rekonstruktion von Maispflanzen vorgestellt, wobei die Rekonstruktion eines Blattes (Abbildung 1.1) auf zwei Aufnahmen basiert. In diesen werden mithilfe des Canny-Algorithmus [Can86] Kanten extrahiert, aus welchen eine automatische Auswahl getroffen wird. Die anschlie{\ss}ende Zuordnung homologer Kanten wird jedoch manuell vollzogen. Es folgen die 3-D-Rekonstruktionen des Blattrandes und der in einer Maispflanze zentral verlaufenden Blattader durch ein Schneiden der Kurven im Raum. Anschlie{\ss}end wird in der Arbeit eine Oberfl\"ache ausgehend von den gefundenen 3-D-Konturen triangliert. Dies ist m\"oglich, da Bl\"atter von Maispflanzen sehr schmal sind und daher die rekonstruierten 3-D-Konturen nahe beieinander liegen. In [Nie04] wird die 3-D-Rekonstruktion von Bl\"attern junger Maispflanzen mit NURBS [PT96] vollzogen. Dabei werden zun\"achst die Konturen der Bl\"atter manuell gekennzeichnet, um anschlie{\ss}end als Eingabe f\"ur die Konstruktion des 3-D-Modells zu dienen. Die theoretische Grundlage ist ein Verfahren, das f\"ur eine spezielle Konfiguration von drei Kameras eine 3-D-NURBS-Kurve eines freigeformten, linien\"ahnlichen Objektes konstruiert [DXP+\"u3]. Zun\"achst wird dazu die Abbildung des Objektes in den jeweiligen Bildern in Form von 2-D-NURBS-Kurven approximiert. Sind diese in allen Bildern durch eine gleiche Anzahl von Kontrollpunkten und einen \"ubereinstimmenden Knotenvektor dargestellt, so k\"onnen die Kontrollpunkte im 3-D-Raum rekonstruiert werden und f\"uhren zur gesuchten Rekonstruktion durch eine 3-D-NURBS-Kurve. \textbf{1.4 Aufbau der Arbeit} Zu Beginn wird in Kapitel 2 die Vorverarbeitung der Eingabebilder beschrieben. In diesen wird zun\"achst mit einem Graph-Cut- Verfahren das Blatt segmentiert und anschlie{\ss}end seine Kontur extrahiert. Es folgt die Berechnung einer Distanztransformation des Konturbildes, wodurch f\"ur jeden Bildpunkt der Abstand zur Kontur angegeben wird. In Kapitel 3 wird der in dieser Arbeit vorgestellte Algorithmus zur 3-D-Rekonstruktion des Blattrandes beschrieben. Anschlie{\ss}end werden in Kapitel 4 die Erstellung einer synthetischen Szene zur Bewertung der Ergebnisse und die verwendeten Mittel zur statistischen Auswertung der Fehler dargestellt. In Kapitel 5 werden f\"ur reale und synthetische Bilder die Ergebnisse durchgef\"uhrter Experimente pr\"asentiert und er\"ortert. Zum Abschluss der Arbeit folgen in Kapitel 6 eine Zusammenfassung und ein Ausblick auf m\"ogliche Weiterf\"uhrungen und Alternativen des vorgestellten Verfahrens.},
    }

  • M. Bennewitz, C. Stachniss, S. Behnke, and W. Burgard, “Utilizing Reflection Properties of Surfaces to Improve Mobile Robot Localization,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Kobe, Japan, 2009.
    [BibTeX]
    [none]
    @InProceedings{bennewitz2009,
    title = {Utilizing Reflection Properties of Surfaces to Improve Mobile Robot Localization},
    author = {M. Bennewitz and Stachniss, C. and Behnke, S. and Burgard, W.},
    booktitle = icra,
    year = {2009},
    address = {Kobe, Japan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • W. Burgard, C. Stachniss, G. Grisetti, B. Steder, R. Kümmerle, C. Dornhege, M. Ruhnke, A. Kleiner, and J. D. Tardós, “A Comparison of SLAM Algorithms Based on a Graph of Relations,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2009.
    [BibTeX] [PDF]
    [none]
    @InProceedings{burgard2009,
    title = {A Comparison of {SLAM} Algorithms Based on a Graph of Relations},
    author = {W. Burgard and C. Stachniss and G. Grisetti and B. Steder and R. K\"ummerle and C. Dornhege and M. Ruhnke and A. Kleiner and J.D. Tard\'os},
    booktitle = iros,
    year = {2009},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/burgard09iros.pdf},
    }

  • Dickscheid and W. Förstner, “Evaluating the Suitability of Feature Detectors for Automatic Image Orientation Systems,” in 7th International Conf. on Computer Vision Systems (ICVS’09)., Liege, Belgium, 2009, p. 305–314. doi:10.1007/978-3-642-04667-4_31
    [BibTeX] [PDF]

    We investigate the suitability of different local feature detectors for the task of automatic image orientation under different scene texturings. Building on an existing system for image orientation, we vary the applied operators while keeping the strategy xed, and evaluate the results. An emphasis is put on the effect of combining detectors for calibrating diffcult datasets. Besides some of the most popular scale and affine invariant detectors available, we include two recently proposed operators in the setup: A scale invariant junction detector and a scale invariant detector based on the local entropy of image patches. After describing the system, we present a detailed performance analysis of the different operators on a number of image datasets. We both analyze ground-truth-deviations and results of a nal bundle adjustment, including observations, 3D object points and camera poses. The paper concludes with hints on the suitability of the different combinations of detectors, and an assessment of the potential of such automatic orientation procedures.

    @InProceedings{dickscheid2009evaluating,
    title = {Evaluating the Suitability of Feature Detectors for Automatic Image Orientation Systems},
    author = {Dickscheid, and F\"orstner, Wolfgang},
    booktitle = {7th International Conf. on Computer Vision Systems (ICVS'09).},
    year = {2009},
    address = {Liege, Belgium},
    editor = {Mario Fritz and Bernt Schiele and Justus H. Piater},
    pages = {305--314},
    publisher = {Springer},
    series = {Lecture Notes in Computer Science},
    volume = {5815},
    abstract = {We investigate the suitability of different local feature detectors for the task of automatic image orientation under different scene texturings. Building on an existing system for image orientation, we vary the applied operators while keeping the strategy xed, and evaluate the results. An emphasis is put on the effect of combining detectors for calibrating diffcult datasets. Besides some of the most popular scale and affine invariant detectors available, we include two recently proposed operators in the setup: A scale invariant junction detector and a scale invariant detector based on the local entropy of image patches. After describing the system, we present a detailed performance analysis of the different operators on a number of image datasets. We both analyze ground-truth-deviations and results of a nal bundle adjustment, including observations, 3D object points and camera poses. The paper concludes with hints on the suitability of the different combinations of detectors, and an assessment of the potential of such automatic orientation procedures.},
    doi = {10.1007/978-3-642-04667-4_31},
    isbn = {978-3-642-04666-7},
    location = {Heidelberg},
    url = {https://www.ipb.uni-bonn.de/pdfs/Dickscheid2009Evaluating.pdf},
    }

  • M. Drauschke, “Documentation: Segmentation and Graph Construction of HMRF,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2009-03, 2009.
    [BibTeX] [PDF]

    This is a technical report for presenting a documentation on the segmentation and the graph construction of a Hierarchical Markov random eld (HMRF). The segmentation is based on multiscale analysis and watershed regions as presented in [Drauschke et al., 2006]. The region’s development is tracked over the scales, which de nes a region hierarchy graph. This graph is used to improve the segmentation by reforming the regions geometrically more precisely. This work is taken from [Drauschke, 2009]. Furthermore, we determine a region adjacency graph from each image partition of all scales. The detected image regions, their adjacent regions and their hierarchical neighbors are saved into an xml- fille for a convenient output.

    @TechReport{drauschke2009documentation,
    title = {Documentation: Segmentation and Graph Construction of HMRF},
    author = {Drauschke, Martin},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2009},
    number = {TR-IGG-P-2009-03},
    abstract = {This is a technical report for presenting a documentation on the segmentation and the graph construction of a Hierarchical Markov random eld (HMRF). The segmentation is based on multiscale analysis and watershed regions as presented in [Drauschke et al., 2006]. The region's development is tracked over the scales, which de nes a region hierarchy graph. This graph is used to improve the segmentation by reforming the regions geometrically more precisely. This work is taken from [Drauschke, 2009]. Furthermore, we determine a region adjacency graph from each image partition of all scales. The detected image regions, their adjacent regions and their hierarchical neighbors are saved into an xml- fille for a convenient output.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2009Documentation.pdf},
    }

  • M. Drauschke, “An Irregular Pyramid for Multi-scale Analysis of Objects and their Parts,” in 7th IAPR-TC-15 Workshop on Graph-based Representations in Pattern Recognition, Venice, Italy, 2009, p. 293–303. doi:10.1007/978-3-642-02124-4_30
    [BibTeX] [PDF]

    We present an irregular image pyramid which is derived from multi-scale analysis of segmented watershed regions. Our framework is based on the development of regions in the Gaussian scale-space, which is represented by a region hierarchy graph. Using this structure, we are able to determine geometrically precise borders of our segmented regions using a region focusing. In order to handle the complexity, we select only stable regions and regions resulting from a merging event, which enables us to keep the hierarchical structure of the regions. Using this framework, we are able to detect objects of various scales in an image. Finally, the hierarchical structure is used for describing these detected regions as aggregations of their parts. We investigate the usefulness of the regions for interpreting images showing building facades with parts like windows, balconies or entrances.

    @InProceedings{drauschke2009irregular,
    title = {An Irregular Pyramid for Multi-scale Analysis of Objects and their Parts},
    author = {Drauschke, Martin},
    booktitle = {7th IAPR-TC-15 Workshop on Graph-based Representations in Pattern Recognition},
    year = {2009},
    address = {Venice, Italy},
    pages = {293--303},
    abstract = {We present an irregular image pyramid which is derived from multi-scale analysis of segmented watershed regions. Our framework is based on the development of regions in the Gaussian scale-space, which is represented by a region hierarchy graph. Using this structure, we are able to determine geometrically precise borders of our segmented regions using a region focusing. In order to handle the complexity, we select only stable regions and regions resulting from a merging event, which enables us to keep the hierarchical structure of the regions. Using this framework, we are able to detect objects of various scales in an image. Finally, the hierarchical structure is used for describing these detected regions as aggregations of their parts. We investigate the usefulness of the regions for interpreting images showing building facades with parts like windows, balconies or entrances.},
    city = {Bonn},
    doi = {10.1007/978-3-642-02124-4_30},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2009Irregular.pdf},
    }

  • M. Drauschke, W. Förstner, and A. Brunn, “Multidodging: Ein effizienter Algorithmus zur automatischen Verbesserung von digitalisierten Luftbildern,” in Publikationen der DGPF, Band 18: Zukunft mit Tradition, Jena, 2009, p. 61–68.
    [BibTeX] [PDF]

    Wir haben ein effizientes, automatisches Verfahren zur Verbesserung von digitalisierten Luftbildern entwickelt. Das Verfahren MULTIDODGING dient im Kontext der visuellen Aufbereitung von historischen Aufnahmen aus dem 2. Weltkrieg. Bei der Bildverbesserung mittels MULTIDODGING wird das eingescannte Bild zunächst in sich nicht überlappende rechteckige Bildausschnitte unterteilt. In jedem Bildausschnitt wird eine Histogrammverebnung durchgeführt, die im Allgemeinen zu einer Verstärkung des Kontrasts führt. Durch die regionale Veränderung des Bildes entstehen sichtbare Grenzen zwischen den Bildausschnitten, die durch eine Interpolation entfernt werden. In der Anwendung des bisherigen Verfahrens hat sich gezeigt, dass der Kontrast in vielen lokalen Stellen zu stark ist. Deshalb kann zum Abschluss die Spannweite der Grauwerte zusätzlich reduziert werden, wobei diese Kontrastanpassung regional aus den Gradienten im Bildausschnitt berechnet wird. Dieser Beitrag beschreibt und analysiert das Verfahren im Detail.

    @InProceedings{drauschke2009multidodging,
    title = {Multidodging: Ein effizienter Algorithmus zur automatischen Verbesserung von digitalisierten Luftbildern},
    author = {Drauschke, Martin and F\"orstner, Wolfgang and Brunn, Ansgar},
    booktitle = {Publikationen der DGPF, Band 18: Zukunft mit Tradition},
    year = {2009},
    address = {Jena},
    pages = {61--68},
    abstract = {Wir haben ein effizientes, automatisches Verfahren zur Verbesserung von digitalisierten Luftbildern entwickelt. Das Verfahren MULTIDODGING dient im Kontext der visuellen Aufbereitung von historischen Aufnahmen aus dem 2. Weltkrieg. Bei der Bildverbesserung mittels MULTIDODGING wird das eingescannte Bild zun\"achst in sich nicht \"uberlappende rechteckige Bildausschnitte unterteilt. In jedem Bildausschnitt wird eine Histogrammverebnung durchgef\"uhrt, die im Allgemeinen zu einer Verst\"arkung des Kontrasts f\"uhrt. Durch die regionale Ver\"anderung des Bildes entstehen sichtbare Grenzen zwischen den Bildausschnitten, die durch eine Interpolation entfernt werden. In der Anwendung des bisherigen Verfahrens hat sich gezeigt, dass der Kontrast in vielen lokalen Stellen zu stark ist. Deshalb kann zum Abschluss die Spannweite der Grauwerte zus\"atzlich reduziert werden, wobei diese Kontrastanpassung regional aus den Gradienten im Bildausschnitt berechnet wird. Dieser Beitrag beschreibt und analysiert das Verfahren im Detail.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2009Multidodging.pdf},
    }

  • M. Drauschke, R. Roscher, T. Läbe, and W. Förstner, “Improving Image Segmentation using Multiple View Analysis,” in Object Extraction for 3D City Models, Road Databases and Traffic Monitoring – Concepts, Algorithms and Evaluatin (CMRT09), 2009, pp. 211-216.
    [BibTeX] [PDF]

    In our contribution, we improve image segmentation by integrating depth information from multi-view analysis. We assume the object surface in each region can be represented by a low order polynomial, and estimate the best fitting parameters of a plane using those points of the point cloud, which are mapped to the specific region. We can merge adjacent image regions, which cannot be distinguished geometrically. We demonstrate the approach for finding spatially planar regions on aerial images. Furthermore, we discuss the possibilities of extending of our approach towards segmenting terrestrial facade images.

    @InProceedings{drauschke2009improving,
    title = {Improving Image Segmentation using Multiple View Analysis},
    author = {Drauschke, Martin and Roscher, Ribana and L\"abe, Thomas and F\"orstner, Wolfgang},
    booktitle = {Object Extraction for 3D City Models, Road Databases and Traffic Monitoring - Concepts, Algorithms and Evaluatin (CMRT09)},
    year = {2009},
    pages = {211-216},
    abstract = {In our contribution, we improve image segmentation by integrating depth information from multi-view analysis. We assume the object surface in each region can be represented by a low order polynomial, and estimate the best fitting parameters of a plane using those points of the point cloud, which are mapped to the specific region. We can merge adjacent image regions, which cannot be distinguished geometrically. We demonstrate the approach for finding spatially planar regions on aerial images. Furthermore, we discuss the possibilities of extending of our approach towards segmenting terrestrial facade images.},
    city = {Paris},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2009Improving.pdf},
    }

  • F. Endres, J. Hess, N. Franklin, C. Plagemann, C. Stachniss, and W. Burgard, “Estimating Range Information from Monocular Vision,” in Workshop Regression in Robotics – Approaches and Applications at Robotics: Science and Systems (RSS), Seattle, WA, USA, 2009.
    [BibTeX]
    [none]
    @InProceedings{endres2009,
    title = {Estimating Range Information from Monocular Vision},
    author = {Endres, F. and Hess, J. and Franklin, N. and Plagemann, C. and Stachniss, C. and Burgard, W.},
    booktitle = {Workshop Regression in Robotics - Approaches and Applications at Robotics: Science and Systems (RSS)},
    year = {2009},
    address = {Seattle, WA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • F. Endres, C. Plagemann, C. Stachniss, and W. Burgard, “Scene Analysis using Latent Dirichlet Allocation,” in Proc. of Robotics: Science and Systems (RSS), Seattle, WA, USA, 2009.
    [BibTeX] [PDF]
    [none]
    @InProceedings{endres2009a,
    title = {Scene Analysis using Latent Dirichlet Allocation},
    author = {F. Endres and C. Plagemann and Stachniss, C. and Burgard, W.},
    booktitle = rss,
    year = {2009},
    address = {Seattle, WA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/endres09rss-draft.pdf},
    }

  • C. Eppner, J. Sturm, M. Bennewitz, C. Stachniss, and W. Burgard, “Imitation Learning with Generalized Task Descriptions,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Kobe, Japan, 2009.
    [BibTeX]
    [none]
    @InProceedings{eppner2009,
    title = {Imitation Learning with Generalized Task Descriptions},
    author = {C. Eppner and J. Sturm and M. Bennewitz and Stachniss, C. and Burgard, W.},
    booktitle = icra,
    year = {2009},
    address = {Kobe, Japan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • W. Förstner, “Computer Vision and Remote Sensing – Lessons Learned,” in Photogrammetric Week 2009, Heidelberg, 2009, p. 241–249.
    [BibTeX] [PDF]

    Photogrammetry has significantly been influenced by its two neigbouring fields, namely Computer Vision and Remote Sensing. Today, Photogrammetry has been become a part of Remote Sensing. The paper reflects its growing relations with Computer Vision, based on a more than 25 years experience of the author with the fascinating field between cognitive, natural and engineering science, which stimulated his own research and transferred him into a wanderer between two worlds.

    @InProceedings{forstner2009computer,
    title = {Computer Vision and Remote Sensing - Lessons Learned},
    author = {F\"orstner, Wolfgang},
    booktitle = {Photogrammetric Week 2009},
    year = {2009},
    address = {Heidelberg},
    pages = {241--249},
    abstract = {Photogrammetry has significantly been influenced by its two neigbouring fields, namely Computer Vision and Remote Sensing. Today, Photogrammetry has been become a part of Remote Sensing. The paper reflects its growing relations with Computer Vision, based on a more than 25 years experience of the author with the fascinating field between cognitive, natural and engineering science, which stimulated his own research and transferred him into a wanderer between two worlds.},
    city = {Stuttgart},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2009Computer.pdf},
    note = {Slides are available at \url{https://www.ipb.uni-bonn.de/pdfs/Forstner2009Computer_slides.pdf} },
    }

  • W. Förstner, “Mustererkennung in der Fernerkundung,” in Publikationen der DGPF, Band 18: Zukunft mit Tradition, Jena, 2009, p. 129–136.
    [BibTeX] [PDF]

    Der Beitrag beleuchtet die Forschung in Photogrammetrie und Fernerkundung unter dem Blickwinkel der Methoden, die für die Lösung der zentrale Aufgabe beider Fachgebiete, der Bildinterpretation, erforderlich sind, sowohl zur Integration beider Gebiete, wie zu einer effizienten Gestaltung gemeinsamerer Forschung. Ingredienzien für erfolgreiche Forschung in diesem Bereich sind Fokussierung auf Themen, die in ca. eine Dekade bearbeitet werden können, enge Kooperation mit den fachlich angrenzenden Disziplinen – der Mustererkennung und dem maschinellen Lernen – , kompetetives Benchmarking, Softwareaustausch und Integration der Forschungsthemen in die Ausbildung. Der Beitrag skizziert ein Forschungsprogamm mit den Themen ‘Mustererkennung in der Fernerkundung’ und Interpretation von LIDARDaten das, interdisziplinär ausgerichtet, die Photogrammetrie mit den unmittelbaren Nachbardisziplinen zunehmend verweben könnte, und – nach Ansicht des Autors – zur Erhaltung der Innovationskraft auch dringend erforderlich ist.

    @InProceedings{forstner2009mustererkennung,
    title = {Mustererkennung in der Fernerkundung},
    author = {F\"orstner, Wolfgang},
    booktitle = {Publikationen der DGPF, Band 18: Zukunft mit Tradition},
    year = {2009},
    address = {Jena},
    pages = {129--136},
    abstract = {Der Beitrag beleuchtet die Forschung in Photogrammetrie und Fernerkundung unter dem Blickwinkel der Methoden, die f\"ur die L\"osung der zentrale Aufgabe beider Fachgebiete, der Bildinterpretation, erforderlich sind, sowohl zur Integration beider Gebiete, wie zu einer effizienten Gestaltung gemeinsamerer Forschung. Ingredienzien f\"ur erfolgreiche Forschung in diesem Bereich sind Fokussierung auf Themen, die in ca. eine Dekade bearbeitet werden k\"onnen, enge Kooperation mit den fachlich angrenzenden Disziplinen - der Mustererkennung und dem maschinellen Lernen - , kompetetives Benchmarking, Softwareaustausch und Integration der Forschungsthemen in die Ausbildung. Der Beitrag skizziert ein Forschungsprogamm mit den Themen 'Mustererkennung in der Fernerkundung' und Interpretation von LIDARDaten das, interdisziplin\"ar ausgerichtet, die Photogrammetrie mit den unmittelbaren Nachbardisziplinen zunehmend verweben k\"onnte, und - nach Ansicht des Autors - zur Erhaltung der Innovationskraft auch dringend erforderlich ist.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2009Mustererkennung.pdf},
    }

  • W. Förstner, T. Dickscheid, and F. Schindler, “On the Completeness of Coding with Image Features,” in 20th British Machine Vision Conf., London, UK, 2009. doi:10.5244/C.23.1
    [BibTeX] [PDF]

    We present a scheme for measuring completeness of local feature extraction in terms of image coding. Completeness is here considered as good coverage of relevant image information by the features. As each feature requires a certain number of bits which are representative for a certain subregion of the image, we interpret the coverage as a sparse coding scheme. The measure is therefore based on a comparison of two densities over the image domain: An entropy density pH(x) based on local image statistics, and a feature coding density pc(x) which is directly computed from each particular set of local features. Motivated by the coding scheme in JPEG, the entropy distribution is derived from the power spectrum of local patches around each pixel position in a statistically sound manner. As the total number of bits for coding the image and for representing it with local features may be different, we measure incompleteness by the Hellinger distance between pH(x) and pc(x). We will derive a procedure for measuring incompleteness of possibly mixed sets of local features and show results on standard datasets using some of the most popular region and keypoint detectors, including Lowe, MSER and the recently published SFOP detectors. Furthermore, we will draw some interesting conclusions about the complementarity of detectors.

    @InProceedings{forstner2009completeness,
    title = {On the Completeness of Coding with Image Features},
    author = {F\"orstner, Wolfgang and Dickscheid, Timo and Schindler, Falko},
    booktitle = {20th British Machine Vision Conf.},
    year = {2009},
    address = {London, UK},
    abstract = {We present a scheme for measuring completeness of local feature extraction in terms of image coding. Completeness is here considered as good coverage of relevant image information by the features. As each feature requires a certain number of bits which are representative for a certain subregion of the image, we interpret the coverage as a sparse coding scheme. The measure is therefore based on a comparison of two densities over the image domain: An entropy density pH(x) based on local image statistics, and a feature coding density pc(x) which is directly computed from each particular set of local features. Motivated by the coding scheme in JPEG, the entropy distribution is derived from the power spectrum of local patches around each pixel position in a statistically sound manner. As the total number of bits for coding the image and for representing it with local features may be different, we measure incompleteness by the Hellinger distance between pH(x) and pc(x). We will derive a procedure for measuring incompleteness of possibly mixed sets of local features and show results on standard datasets using some of the most popular region and keypoint detectors, including Lowe, MSER and the recently published SFOP detectors. Furthermore, we will draw some interesting conclusions about the complementarity of detectors.},
    doi = {10.5244/C.23.1},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2009Completeness.pdf},
    }

  • W. Förstner, T. Dickscheid, and F. Schindler, “Detecting Interpretable and Accurate Scale-Invariant Keypoints,” in 12th IEEE International Conf. on Computer Vision (ICCV’09), Kyoto, Japan, 2009, p. 2256–2263. doi:10.1109/ICCV.2009.5459458
    [BibTeX] [PDF]

    This paper presents a novel method for detecting scale invariant keypoints. It fills a gap in the set of available methods, as it proposes a scale-selection mechanism for junction-type features. The method is a scale-space extension of the detector proposed by Förstner (1994) and uses the general spiral feature model of Bigün (1990) to unify different types of features within the same framework. By locally optimising the consistency of image regions with respect to the spiral model, we are able to detect and classify image structures with complementary properties over scalespace, especially star and circular shapes as interpretable and identifiable subclasses. Our motivation comes from calibrating images of structured scenes with poor texture, where blob detectors alone cannot find sufficiently many keypoints, while existing corner detectors fail due to the lack of scale invariance. The procedure can be controlled by semantically clear parameters. One obtains a set of keypoints with position, scale, type and consistency measure. We characterise the detector and show results on common benchmarks. It competes in repeatability with the Lowe detector, but finds more stable keypoints in poorly textured areas, and shows comparable or higher accuracy than other recent detectors. This makes it useful for both object recognition and camera calibration.

    @InProceedings{forstner2009detecting,
    title = {Detecting Interpretable and Accurate Scale-Invariant Keypoints},
    author = {F\"orstner, Wolfgang and Dickscheid, Timo and Schindler, Falko},
    booktitle = {12th IEEE International Conf. on Computer Vision (ICCV'09)},
    year = {2009},
    address = {Kyoto, Japan},
    pages = {2256--2263},
    abstract = {This paper presents a novel method for detecting scale invariant keypoints. It fills a gap in the set of available methods, as it proposes a scale-selection mechanism for junction-type features. The method is a scale-space extension of the detector proposed by F\"orstner (1994) and uses the general spiral feature model of Big\"un (1990) to unify different types of features within the same framework. By locally optimising the consistency of image regions with respect to the spiral model, we are able to detect and classify image structures with complementary properties over scalespace, especially star and circular shapes as interpretable and identifiable subclasses. Our motivation comes from calibrating images of structured scenes with poor texture, where blob detectors alone cannot find sufficiently many keypoints, while existing corner detectors fail due to the lack of scale invariance. The procedure can be controlled by semantically clear parameters. One obtains a set of keypoints with position, scale, type and consistency measure. We characterise the detector and show results on common benchmarks. It competes in repeatability with the Lowe detector, but finds more stable keypoints in poorly textured areas, and shows comparable or higher accuracy than other recent detectors. This makes it useful for both object recognition and camera calibration.},
    doi = {10.1109/ICCV.2009.5459458},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2009Detectinga.pdf},
    }

  • B. Frank, C. Stachniss, R. Schmedding, W. Burgard, and M. Teschner, “Real-world Robot Navigation amongst Deformable Obstacles,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Kobe, Japan, 2009.
    [BibTeX]
    [none]
    @InProceedings{frank2009,
    title = {Real-world Robot Navigation amongst Deformable Obstacles},
    author = {B. Frank and C. Stachniss and R. Schmedding and W. Burgard and M. Teschner},
    booktitle = icra,
    year = {2009},
    address = {Kobe, Japan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • G. Grisetti, C. Stachniss, and W. Burgard, “Non-linear Constraint Network Optimization for Efficient Map Learning,” IEEE Transactions on Intelligent Transportation Systems, vol. 10, iss. 3, p. 428–439, 2009.
    [BibTeX] [PDF]
    [none]
    @Article{grisetti2009,
    title = {Non-linear Constraint Network Optimization for Efficient Map Learning},
    author = {Grisetti, G. and Stachniss, C. and Burgard, W.},
    journal = ieeeits,
    year = {2009},
    number = {3},
    pages = {428--439},
    volume = {10},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti09its.pdf},
    }

  • F. Korč and W. Förstner, “eTRIMS Image Database for Interpreting Images of Man-Made Scenes,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2009-01, 2009.
    [BibTeX] [PDF]

    We describe ground truth data that we provide to serve as a basis for evaluation and comparison of supervised learning approaches to image interpretation. The provided ground truth, the eTRIMS Image Database, is a collection of annotated images of real world street scenes. Typical objects in these images are variable in shape and appearance, in the number of its parts and appear in a variety of con gurations. The domain of man-made scenes is thus well suited for evaluation and comparison of a variety of interpretation approaches, including those that employ structure models. The provided pixelwise ground truth assigns each image pixel both with a class label and an object label and o ffers thus ground truth annotation both on the level of pixels and regions. While we believe that such ground truth is of general interest in supervised learning, such data may be of further relevance in emerging real world applications involving automation of man-made scene interpretation.

    @TechReport{korvc2009etrims,
    title = {{eTRIMS} Image Database for Interpreting Images of Man-Made Scenes},
    author = {Kor{\vc}, Filip and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2009},
    month = apr,
    number = {TR-IGG-P-2009-01},
    abstract = {We describe ground truth data that we provide to serve as a basis for evaluation and comparison of supervised learning approaches to image interpretation. The provided ground truth, the eTRIMS Image Database, is a collection of annotated images of real world street scenes. Typical objects in these images are variable in shape and appearance, in the number of its parts and appear in a variety of con gurations. The domain of man-made scenes is thus well suited for evaluation and comparison of a variety of interpretation approaches, including those that employ structure models. The provided pixelwise ground truth assigns each image pixel both with a class label and an object label and o ffers thus ground truth annotation both on the level of pixels and regions. While we believe that such ground truth is of general interest in supervised learning, such data may be of further relevance in emerging real world applications involving automation of man-made scene interpretation.},
    institute = {Dept. of Photogrammetry, University of Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Korvc2009eTRIMS.pdf},
    }

  • R. Kuemmerle, B. Steder, C. Dornhege, M. Ruhnke, G. Grisetti, C. Stachniss, and A. Kleiner, “On measuring the accuracy of SLAM algorithms,” Autonomous Robots, vol. 27, p. 387ff, 2009.
    [BibTeX] [PDF]
    [none]
    @Article{kuemmerle2009,
    title = {On measuring the accuracy of {SLAM} algorithms},
    author = {R. Kuemmerle and B. Steder and C. Dornhege and M. Ruhnke and G. Grisetti and C. Stachniss and A. Kleiner},
    journal = auro,
    year = {2009},
    pages = {387ff},
    volume = {27},
    abstract = {[none]},
    issue = {4},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/kuemmerle09auro.pdf},
    }

  • J. Meidow, C. Beder, and W. Förstner, “Reasoning with uncertain points, straight lines, and straight line segments in 2D,” ISPRS Journal of Photogrammetry and Remote Sensing, vol. 64, iss. 2, p. 125–139, 2009. doi:10.1016/j.isprsjprs.2008.09.013
    [BibTeX] [PDF]

    Decisions based on basic geometric entities can only be optimal, if their uncertainty is propagated trough the entire reasoning chain. This concerns the construction of new entities from given ones, the testing of geometric relations between geometric entities, and the parameter estimation of geometric entities based on spatial relations which have been found to hold. Basic feature extraction procedures often provide measures of uncertainty. These uncertainties should be incorporated into the representation of geometric entities permitting statistical testing, eliminates the necessity of specifying non-interpretable thresholds and enables statistically optimal parameter estimation. Using the calculus of homogeneous coordinates the power of algebraic projective geometry can be exploited in these steps of image analysis. This review collects, discusses and evaluates the various representations of uncertain geometric entities in 2D together with their conversions. The representations are extended to achieve a consistent set of representations allowing geometric reasoning. The statistical testing of geometric relations is presented. Furthermore, a generic estimation procedure is provided for multiple uncertain geometric entities based on possibly correlated observed geometric entities and geometric constraints.

    @Article{meidow2009reasoning,
    title = {Reasoning with uncertain points, straight lines, and straight line segments in 2D},
    author = {Meidow, Jochen and Beder, Christian and F\"orstner, Wolfgang},
    journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
    year = {2009},
    number = {2},
    pages = {125--139},
    volume = {64},
    abstract = {Decisions based on basic geometric entities can only be optimal, if their uncertainty is propagated trough the entire reasoning chain. This concerns the construction of new entities from given ones, the testing of geometric relations between geometric entities, and the parameter estimation of geometric entities based on spatial relations which have been found to hold. Basic feature extraction procedures often provide measures of uncertainty. These uncertainties should be incorporated into the representation of geometric entities permitting statistical testing, eliminates the necessity of specifying non-interpretable thresholds and enables statistically optimal parameter estimation. Using the calculus of homogeneous coordinates the power of algebraic projective geometry can be exploited in these steps of image analysis. This review collects, discusses and evaluates the various representations of uncertain geometric entities in 2D together with their conversions. The representations are extended to achieve a consistent set of representations allowing geometric reasoning. The statistical testing of geometric relations is presented. Furthermore, a generic estimation procedure is provided for multiple uncertain geometric entities based on possibly correlated observed geometric entities and geometric constraints.},
    city = {Bonn},
    doi = {10.1016/j.isprsjprs.2008.09.013},
    url = {https://www.ipb.uni-bonn.de/pdfs/Meidow2009Reasoning.pdf},
    }

  • J. Meidow, W. Förstner, and C. Beder, “Optimal Parameter Estimation with Homogeneous Entities and Arbitrary Constraints,” in Pattern Recognition (Symposium of DAGM), Jena, Germany, 2009, p. 292–301. doi:10.1007/978-3-642-03798-6_30
    [BibTeX] [PDF]

    Well known estimation techniques in computational geometry usually deal only with single geometric entities as unknown parameters and do not account for constrained observations within the estimation. The estimation model proposed in this paper is much more general, as it can handle multiple homogeneous vectors as well as multiple constraints. Furthermore, it allows the consistent handling of arbitrary covariance matrices for the observed and the estimated entities. The major novelty is the proper handling of singular observation covariance matrices made possible by additional constraints within the estimation. These properties are of special interest for instance in the calculus of algebraic projective geometry, where singular covariance matrices arise naturally from the non-minimal parameterizations of the entities. The validity of the proposed adjustment model will be demonstrated by the estimation of a fundamental matrix from synthetic data and compared to heteroscedastic regression [?], which is considered as state-ofthe- art estimator for this task. As the latter is unable to simultaneously estimate multiple entities, we will also demonstrate the usefulness and the feasibility of our approach by the constrained estimation of three vanishing points from observed uncertain image line segments.

    @InProceedings{meidow2009optimal,
    title = {Optimal Parameter Estimation with Homogeneous Entities and Arbitrary Constraints},
    author = {Meidow, Jochen and F\"orstner, Wolfgang and Beder, Christian},
    booktitle = {Pattern Recognition (Symposium of DAGM)},
    year = {2009},
    address = {Jena, Germany},
    editor = {Denzler, J. and Notni, G.},
    pages = {292--301},
    publisher = {Springer},
    series = {LNCS},
    abstract = {Well known estimation techniques in computational geometry usually deal only with single geometric entities as unknown parameters and do not account for constrained observations within the estimation. The estimation model proposed in this paper is much more general, as it can handle multiple homogeneous vectors as well as multiple constraints. Furthermore, it allows the consistent handling of arbitrary covariance matrices for the observed and the estimated entities. The major novelty is the proper handling of singular observation covariance matrices made possible by additional constraints within the estimation. These properties are of special interest for instance in the calculus of algebraic projective geometry, where singular covariance matrices arise naturally from the non-minimal parameterizations of the entities. The validity of the proposed adjustment model will be demonstrated by the estimation of a fundamental matrix from synthetic data and compared to heteroscedastic regression [?], which is considered as state-ofthe- art estimator for this task. As the latter is unable to simultaneously estimate multiple entities, we will also demonstrate the usefulness and the feasibility of our approach by the constrained estimation of three vanishing points from observed uncertain image line segments.},
    doi = {10.1007/978-3-642-03798-6_30},
    url = {https://www.ipb.uni-bonn.de/pdfs/Meidow2009Optimal.pdf},
    }

  • M. D. Mura, J. A. Benediktsson, B. Waske, and L. Bruzzone, “Morphological attribute filters for the analysis of very high resolution remote sensing images,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS 2009), 2009. doi:10.1109/IGARSS.2009.5418096
    [BibTeX]

    This paper proposes the use of morphological attribute profiles as an effective alternative to the conventional morphological operators based on the geodesic reconstruction for modeling the spatial information in very high resolution images. Attribute profiles, used in multilevel approaches, result particularly effective in terms of computational complexity and capabilities in characterizing the objects in the image. In addition they are more flexible than operators by reconstruction, thanks to the definition of possible different attributes. Experimental results obtained on a Quickbird panchromatic very high resolution image proved the effectiveness of the presented attribute filters and pointed out their main properties.

    @InProceedings{mura2009morphological,
    title = {Morphological attribute filters for the analysis of very high resolution remote sensing images},
    author = {Mura, M.D. and Benediktsson, J.A. and Waske, Bj\"orn and Bruzzone, L.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS 2009)},
    year = {2009},
    abstract = {This paper proposes the use of morphological attribute profiles as an effective alternative to the conventional morphological operators based on the geodesic reconstruction for modeling the spatial information in very high resolution images. Attribute profiles, used in multilevel approaches, result particularly effective in terms of computational complexity and capabilities in characterizing the objects in the image. In addition they are more flexible than operators by reconstruction, thanks to the definition of possible different attributes. Experimental results obtained on a Quickbird panchromatic very high resolution image proved the effectiveness of the presented attribute filters and pointed out their main properties.},
    doi = {10.1109/IGARSS.2009.5418096},
    keywords = {Quickbird panchromatic imagery;computational complexity;geodesic reconstruction;morphological attribute filters;morphological operators;spatial information;very high resolution remote sensing images;computational complexity;geophysical image processing;image reconstruction;mathematical morphology;remote sensing;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • M. Pilger, “Automatische Bestimmung skalierungsinvarianter Fenster für markante Bildpunkte,” Diploma Thesis Master Thesis, 2009.
    [BibTeX]

    Wir haben basierend auf dem Interestoperator von Förstner und Gülch einen skaleninvarianten Operator in Matlab implementiert, der möglichst präzise lokalisierbare Kantenschnittpunkte und ihre Skalen aus Bildern extrahiert. Dazu wurden C++-Bibliotheken zur Rauschschätzung und zur schnellen Berechnung von Faltungen nach der Methode von Deriche für Matlab verfügbar gemacht. Leider hat sich herausgestellt, dass die Faltungen mit dem Deriche-Filter für unsere spezielle Anwendung nicht geeignet ist: Es entstehen Artefakte in unserer Optimierungsfunktion, so dass eine zuverlässige Auswertung nicht gewährleistet ist. Indem wir unsere Funktionen durch Faltungen im Frequenzbereich berechnet haben, konnten wir zunächst auf Testbildern Kantenschnittpunkte mit entsprechenden Skalen extrahieren. Perfekte Skaleninvarianz bei Maßstabsänderung des Bildes konnten wir in einem Experiment nicht nachweisen: die detektierte Skala eines Kantenschnittpunktes wuchs im Experiment nicht schnell genug mit dem größer werdenden Bildmaßstab mit. Dennoch erzielten wir auf realen Bildern gute Ergebnisse und detektierten auf zwei Bildern, die sich durch bekannte geometrische oder radiometrische Transformationen unterscheiden, prozentual ähnlich viele korrespondierende Punkte und Skalen wie existierende skaleninvarianteInterestoperatoren. Gemessen an der absoluten Zahl der Detektionen liegt unser Operator weit hinter dem SIFT-Operator und dem Harris-Laplace Operator – beide entdecken auf realen Bildern meist mehr als doppelt so viele Punkte wie unser Operator. Allerdings kann unser Operator auf einen weiteren Typus von Interestpunkten erweitert werden, das sind Zentren kreissymmetrischer Bildmerkmale, oder allgemeiner auch auf spiralartige Merkmale. Damit kann in Zukunft möglicherweise das Manko der geringen Anzahl an Detektionen überwunden werden. Ohne einen Schwellwert detektiert unser Operator auch zufällig verteilte Punkte und Skalen in homogenen Bildbereichen. Wir haben gezeigt, dass es sinnvoll ist, ein Homogenitätsmaß zu benutzen, um Detektionen auf homogenen Bildbereichen zu unterdrücken, und dennoch auch nicht so gut lokalisierte Punkte, die aber zu einer Bildorientierung beitragen können, zu erhalten. Unser Operator lässt im derzeitigen Entwicklungsstadium noch Raum für Erweiterungen: neben der schon erwähnten Einbeziehung weiterer Punktmerkmale kann bei Farbbildern die Information aller drei Kanäle in die Detektion mit einbezogen werden, ähnlich wie bei Fuchs (1997), ohne das Bild unter Informationsverlust auf einen Helligkeitskanal zu reduzieren. Außerdem könnte untersucht werden, ob sich ein Oversampling des Bildes vor der Berechnung der quadratischen Gradienten, wie es Köthe (2003) vorschlägt, vorteilhaft auf die Punktdetektionen auswirkt. Wichtig für Anwendungen in der Praxis wäre auch eine deutliche Geschwindigkeitssteigerung. Abhängig von Bildgröße, Anzahl detektierter Punktkandidaten und Diskretisierungsdichte des Skalenraums kann die Detektion für ein Bild der Größe (800 x 800pel) bei eingeschalteter Subpixelschätzung auf einem 2,4 GHz Computer 15 Minuten dauern. Die meiste Zeit beanspruchen dabei die Faltungen und die kubische Interpolation bei der Subpixelschätzung. Die Zeit für die Faltungen könnte durch einen Übergang auf eine Pyramidendarstellung des Bildes im Skalenraum reduziert werden.

    @MastersThesis{pilger2009automatische,
    title = {Automatische Bestimmung skalierungsinvarianter Fenster f\"ur markante Bildpunkte},
    author = {Pilger, Marko},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2009},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Inform. Timo Dickscheid},
    type = {Diploma Thesis},
    abstract = {Wir haben basierend auf dem Interestoperator von F\"orstner und G\"ulch einen skaleninvarianten Operator in Matlab implementiert, der m\"oglichst pr\"azise lokalisierbare Kantenschnittpunkte und ihre Skalen aus Bildern extrahiert. Dazu wurden C++-Bibliotheken zur Rauschsch\"atzung und zur schnellen Berechnung von Faltungen nach der Methode von Deriche f\"ur Matlab verf\"ugbar gemacht. Leider hat sich herausgestellt, dass die Faltungen mit dem Deriche-Filter f\"ur unsere spezielle Anwendung nicht geeignet ist: Es entstehen Artefakte in unserer Optimierungsfunktion, so dass eine zuverl\"assige Auswertung nicht gew\"ahrleistet ist. Indem wir unsere Funktionen durch Faltungen im Frequenzbereich berechnet haben, konnten wir zun\"achst auf Testbildern Kantenschnittpunkte mit entsprechenden Skalen extrahieren. Perfekte Skaleninvarianz bei Ma{\ss}stabs\"anderung des Bildes konnten wir in einem Experiment nicht nachweisen: die detektierte Skala eines Kantenschnittpunktes wuchs im Experiment nicht schnell genug mit dem gr\"o{\ss}er werdenden Bildma{\ss}stab mit. Dennoch erzielten wir auf realen Bildern gute Ergebnisse und detektierten auf zwei Bildern, die sich durch bekannte geometrische oder radiometrische Transformationen unterscheiden, prozentual \"ahnlich viele korrespondierende Punkte und Skalen wie existierende skaleninvarianteInterestoperatoren. Gemessen an der absoluten Zahl der Detektionen liegt unser Operator weit hinter dem SIFT-Operator und dem Harris-Laplace Operator - beide entdecken auf realen Bildern meist mehr als doppelt so viele Punkte wie unser Operator. Allerdings kann unser Operator auf einen weiteren Typus von Interestpunkten erweitert werden, das sind Zentren kreissymmetrischer Bildmerkmale, oder allgemeiner auch auf spiralartige Merkmale. Damit kann in Zukunft m\"oglicherweise das Manko der geringen Anzahl an Detektionen \"uberwunden werden. Ohne einen Schwellwert detektiert unser Operator auch zuf\"allig verteilte Punkte und Skalen in homogenen Bildbereichen. Wir haben gezeigt, dass es sinnvoll ist, ein Homogenit\"atsma{\ss} zu benutzen, um Detektionen auf homogenen Bildbereichen zu unterdr\"ucken, und dennoch auch nicht so gut lokalisierte Punkte, die aber zu einer Bildorientierung beitragen k\"onnen, zu erhalten. Unser Operator l\"asst im derzeitigen Entwicklungsstadium noch Raum f\"ur Erweiterungen: neben der schon erw\"ahnten Einbeziehung weiterer Punktmerkmale kann bei Farbbildern die Information aller drei Kan\"ale in die Detektion mit einbezogen werden, \"ahnlich wie bei Fuchs (1997), ohne das Bild unter Informationsverlust auf einen Helligkeitskanal zu reduzieren. Au{\ss}erdem k\"onnte untersucht werden, ob sich ein Oversampling des Bildes vor der Berechnung der quadratischen Gradienten, wie es K\"othe (2003) vorschl\"agt, vorteilhaft auf die Punktdetektionen auswirkt. Wichtig f\"ur Anwendungen in der Praxis w\"are auch eine deutliche Geschwindigkeitssteigerung. Abh\"angig von Bildgr\"o{\ss}e, Anzahl
    detektierter Punktkandidaten und Diskretisierungsdichte des Skalenraums kann die Detektion f\"ur ein Bild der Gr\"o{\ss}e (800 x 800pel) bei eingeschalteter Subpixelsch\"atzung auf einem 2,4 GHz Computer 15 Minuten dauern. Die meiste Zeit beanspruchen dabei die Faltungen und die kubische Interpolation bei der Subpixelsch\"atzung. Die Zeit f\"ur die Faltungen k\"onnte durch einen \"Ubergang auf eine Pyramidendarstellung des Bildes im Skalenraum reduziert werden.},
    }

  • R. Roscher and W. Förstner, “Multiclass Bounded Logistic Regression – Efficient Regularization with Interior Point Method,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2009-02, 2009.
    [BibTeX] [PDF]

    Logistic regression has been widely used in classi cation tasks for many years. Its optimization in case of linear separable data has received extensive study due to the problem of a monoton likelihood. This paper presents a new approach, called bounded logistic regression (BLR), by solving the logistic regression as a convex optimization problem with constraints. The paper tests the accuracy of BLR by evaluating nine well-known datasets and compares it to the closely related support vector machine approach (SVM).

    @TechReport{roscher2009multiclass,
    title = {Multiclass Bounded Logistic Regression -- Efficient Regularization with Interior Point Method},
    author = {Roscher, Ribana and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2009},
    number = {TR-IGG-P-2009-02},
    abstract = {Logistic regression has been widely used in classi cation tasks for many years. Its optimization in case of linear separable data has received extensive study due to the problem of a monoton likelihood. This paper presents a new approach, called bounded logistic regression (BLR), by solving the logistic regression as a convex optimization problem with constraints. The paper tests the accuracy of BLR by evaluating nine well-known datasets and compares it to the closely related support vector machine approach (SVM).},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2009Multiclass.pdf},
    }

  • J. Schmittwilken, M. Y. Yang, W. Förstner, and L. Plümer, “Integration of conditional random fields and attribute grammars for range data interpretation of man-made objects,” Annals of GIS, vol. 15, iss. 2, p. 117–126, 2009. doi:10.1080/19475680903464696
    [BibTeX] [PDF]

    A new concept for the integration of low- and high-level reasoning for the interpretation of images of man-made objects is described. The focus is on the 3D reconstruction of facades, especially the transition area between buildings and the surrounding ground. The aim is the identification of semantically meaningful objects such as stairs, entrances, and windows. A low-level module based on randomsample consensus (RANSAC) algorithmgenerates planar polygonal patches. Conditional random fields (CRFs) are used for their classification, based on local neighborhood and priors fromthe grammar. An attribute grammar is used to represent semantic knowledge including object partonomy and observable geometric constraints. The AND-OR tree-based parser uses the precision of the classified patches to control the reconstruction process and to optimize the sampling mechanism of RANSAC. Although CRFs are close to data, attribute grammars make the high-level structure of objects explicit and translate semantic knowledge in observable geometric constraints. Our approach combines top-down and bottom-up reasoning by integrating CRF and attribute grammars and thus exploits the complementary strengths of these methods.

    @Article{schmittwilken2009integration,
    title = {Integration of conditional random fields and attribute grammars for range data interpretation of man-made objects},
    author = {Schmittwilken, J\"org and Yang, Michael Ying and F\"orstner, Wolfgang and Pl\"umer, Lutz},
    journal = {Annals of GIS},
    year = {2009},
    number = {2},
    pages = {117--126},
    volume = {15},
    abstract = {A new concept for the integration of low- and high-level reasoning for the interpretation of images of man-made objects is described. The focus is on the 3D reconstruction of facades, especially the transition area between buildings and the surrounding ground. The aim is the identification of semantically meaningful objects such as stairs, entrances, and windows. A low-level module based on randomsample consensus (RANSAC) algorithmgenerates planar polygonal patches. Conditional random fields (CRFs) are used for their classification, based on local neighborhood and priors fromthe grammar. An attribute grammar is used to represent semantic knowledge including object partonomy and observable geometric constraints. The AND-OR tree-based parser uses the precision of the classified patches to control the reconstruction process and to optimize the sampling mechanism of RANSAC. Although CRFs are close to data, attribute grammars make the high-level structure of objects explicit and translate semantic knowledge in observable geometric constraints. Our approach combines top-down and bottom-up reasoning by integrating CRF and attribute grammars and thus exploits the complementary strengths of these methods.},
    doi = {10.1080/19475680903464696},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schmittwilken2009Integration.pdf},
    }

  • A. Schneider, S. J. C. Stachniss, M. Reisert, H. Burkhardt, and W. Burgard, “Object Identification with Tactile Sensors Using Bag-of-Features,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2009.
    [BibTeX] [PDF]
    [none]
    @InProceedings{schneider2009,
    title = {Object Identification with Tactile Sensors Using Bag-of-Features},
    author = {A. Schneider and J. Sturm C. Stachniss and M. Reisert and H. Burkhardt and W. Burgard},
    booktitle = iros,
    year = {2009},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm09iros.pdf},
    }

  • R. Schultz, “Orientierung einer Kamera in einer Legolandszene,” Bachelor Thesis Master Thesis, 2009.
    [BibTeX]

    Diese Arbeit untersucht ein Verfahren zur Bestimmung der äußeren Orientierung einer Kamera. Für viele Anwendungen in der Photogrammetrie ist es interessant, die äußere Orientierung der Kamera mit geringem Aufwand schätzen zu können. Die äußere Orientierung beschreibt die räumliche Lage der Kamera im Objektkoordinatensystem und lässt sich über die Fluchtpunkte bestimmen. Die Fluchtpunkte lassen sich in einer Legolandszene durch parallele Objektkanten schätzen. In einer Legolandszene bestehen alle Objekte aus Polyedern, die ausschließlich rechte Winkel haben. Hierbei sind die Polyeder parallel zueinander angeordnet. Legolandszenen sind eine Vereinfachung realer Bilder. Sie sollen dem Erlernen des Erkennens von Strukturen, in diesem Falle von Objektkanten dienen. Ziel ist es, eine Methode zu entwickeln, mit deren Hilfe im Bild Objektkanten, die zum gleichen Fluchtpunkt führen, gefunden werden können. Auf Grundlage dieser Kanten kann die äußere Orientierung der Kamera bestimmt werden. Es existiert ein Verfahren zur Bestimmung der äußeren Orientierung der Kamera, unter der Voraussetzung, dass die innere Orientierung bekannt ist. Dieses Verfahren wurde an der Universität Bonn von Prof. Förstner entwickelt. Aufgabe der Bachelorarbeit ist es, dieses Verfahren bezüglich seiner Kantenwahl zu verbessern. Es wurden in den Bildern Kanten segmentiert, unter welchen Kantenpaare manuell dahingehend untersucht wurden, ob sie zum gleichen Fluchtpunkt führen. Diese Datenmenge wurde in eine Test- und Trainingsmenge unterteilt. Die Daten der Trainingsmenge wurden verwendet, um anhand von geometrischen Eigenschaften zu untersuchen, ob ein Kantenpaar zum gleichen Fluchtpunkt führt. Es wurden der Abstand und der Winkel zwischen zwei Kanten sowie deren Überlappung untersucht. Weiterhin wurde zu den extrahierten Kanten eine Dreiecksvermaschung durch eine bedingte Delaunay- Triangulierung konstruiert, mit deren Hilfe ein Kantenzuordnungsverfahren entwickelt wurde. Diese geometrischen Eigenschaften wurden vorerst einzeln und später in Kombination mittels eines Entscheidungsbaumes untersucht. Die für die Eigenschaften ermittelten Kriterien wurden mit den Daten der Testmenge überprüft. Bei den untersuchten Daten erwies sich ein Winkel zwischen 13 Grad und 19 Grad als effektiv. Hiermit wurden 58 % der theoretisch maximalen Utility durch fehlerfreie Klassifikation erreicht, im Kontrast zu 10 % des ursprünglichen Verfahrens.

    @MastersThesis{schultz2009orientierung,
    title = {Orientierung einer Kamera in einer Legolandszene},
    author = {Schultz, Rebekka},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2009},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.- Inform. Timo Dickscheid},
    type = {Bachelor Thesis},
    abstract = {Diese Arbeit untersucht ein Verfahren zur Bestimmung der \"au{\ss}eren Orientierung einer Kamera. F\"ur viele Anwendungen in der Photogrammetrie ist es interessant, die \"au{\ss}ere Orientierung der Kamera mit geringem Aufwand sch\"atzen zu k\"onnen. Die \"au{\ss}ere Orientierung beschreibt die r\"aumliche Lage der Kamera im Objektkoordinatensystem und l\"asst sich \"uber die Fluchtpunkte bestimmen. Die Fluchtpunkte lassen sich in einer Legolandszene durch parallele Objektkanten sch\"atzen. In einer Legolandszene bestehen alle Objekte aus Polyedern, die ausschlie{\ss}lich rechte Winkel haben. Hierbei sind die Polyeder parallel zueinander angeordnet. Legolandszenen sind eine Vereinfachung realer Bilder. Sie sollen dem Erlernen des Erkennens von Strukturen, in diesem Falle von Objektkanten dienen. Ziel ist es, eine Methode zu entwickeln, mit deren Hilfe im Bild Objektkanten, die zum gleichen Fluchtpunkt f\"uhren, gefunden werden k\"onnen. Auf Grundlage dieser Kanten kann die \"au{\ss}ere Orientierung der Kamera bestimmt werden. Es existiert ein Verfahren zur Bestimmung der \"au{\ss}eren Orientierung der Kamera, unter der Voraussetzung, dass die innere Orientierung bekannt ist. Dieses Verfahren wurde an der Universit\"at Bonn von Prof. F\"orstner entwickelt. Aufgabe der Bachelorarbeit ist es, dieses Verfahren bez\"uglich seiner Kantenwahl zu verbessern. Es wurden in den Bildern Kanten segmentiert, unter welchen Kantenpaare manuell dahingehend untersucht wurden, ob sie zum gleichen Fluchtpunkt f\"uhren. Diese Datenmenge wurde in eine Test- und Trainingsmenge unterteilt. Die Daten der Trainingsmenge wurden verwendet, um anhand von geometrischen Eigenschaften zu untersuchen, ob ein Kantenpaar zum gleichen Fluchtpunkt f\"uhrt. Es wurden der Abstand und der Winkel zwischen zwei Kanten sowie deren \"Uberlappung untersucht. Weiterhin wurde zu den extrahierten Kanten eine Dreiecksvermaschung durch eine bedingte Delaunay- Triangulierung konstruiert, mit deren Hilfe ein Kantenzuordnungsverfahren entwickelt wurde. Diese geometrischen Eigenschaften wurden vorerst einzeln und sp\"ater in Kombination mittels eines Entscheidungsbaumes untersucht. Die f\"ur die Eigenschaften ermittelten Kriterien wurden mit den Daten der Testmenge \"uberpr\"uft. Bei den untersuchten Daten erwies sich ein Winkel zwischen 13 Grad und 19 Grad als effektiv. Hiermit wurden 58 % der theoretisch maximalen Utility durch fehlerfreie Klassifikation erreicht, im Kontrast zu 10 % des urspr\"unglichen Verfahrens.},
    }

  • C. Stachniss, “Spatial Modeling and Robot Navigation,” Habilitation PhD Thesis, 2009.
    [BibTeX] [PDF]
    [none]
    @PhDThesis{stachniss2009,
    title = {Spatial Modeling and Robot Navigation},
    author = {C. Stachniss},
    school = {University of Freiburg, Department of Computer Science},
    year = {2009},
    type = {Habilitation},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss-habil.pdf},
    }

  • C. Stachniss, Robotic Mapping and Exploration, Springer, 2009, vol. 55.
    [BibTeX]
    [none]
    @Book{stachniss2009a,
    title = {Robotic Mapping and Exploration},
    author = {C. Stachniss},
    publisher = {Springer},
    year = {2009},
    series = springerstaradvanced,
    volume = {55},
    abstract = {[none]},
    isbn = {978-3-642-01096-5},
    timestamp = {2014.04.24},
    }

  • C. Stachniss, O. Martinez Mozos, and W. Burgard, “Efficient Exploration of Unknown Indoor Environments using a Team of Mobile Robots,” Annals of Mathematics and Artificial Intelligence, vol. 52, p. 205ff, 2009.
    [BibTeX]
    [none]
    @Article{stachniss2009b,
    title = {Efficient Exploration of Unknown Indoor Environments using a Team of Mobile Robots},
    author = {Stachniss, C. and Martinez Mozos, O. and Burgard, W.},
    journal = {Annals of Mathematics and Artificial Intelligence},
    year = {2009},
    pages = {205ff},
    volume = {52},
    abstract = {[none]},
    issue = {2},
    timestamp = {2014.04.24},
    }

  • C. Stachniss, C. Plagemann, and A. J. Lilienthal, “Gas Distribution Modeling using Sparse Gaussian Process Mixtures,” Autonomous Robots, vol. 26, p. 187ff, 2009.
    [BibTeX]
    [none]
    @Article{stachniss2009c,
    title = {Gas Distribution Modeling using Sparse Gaussian Process Mixtures},
    author = {Stachniss, C. and Plagemann, C. and Lilienthal, A.J.},
    journal = auro,
    year = {2009},
    pages = {187ff},
    volume = {26},
    abstract = {[none]},
    issue = {2},
    timestamp = {2014.04.24},
    }

  • R. Steffen, “Visual SLAM from image sequences acquired by unmanned aerial vehicles,” PhD Thesis, 2009.
    [BibTeX]

    Die Verwendung der Triangulation zur Lösung des Problems der gleichzeitigen Lokalisierung und Kartierung findet seit Jahren ihren Eingang in die Entwicklung autonomer Systeme. Aufgrund von Echtzeitanforderungen dieser Systeme erreichen rekursive Schätzverfahren, insbesondere Kalmanfilter basierte Ansätze, große Beliebtheit. Bedauerlicherweise, treten dabei durch die Nichtlinearität der Triangulation einige Effekte auf, welche die Konsistenz und Genauigkeit der Lösung hinsichtlich der geschätzten Parameter maßgeblich beeinflussen. In der Literatur existieren dazu einige interessante Lösungsansätze, um diese genauigkeitsrelevanten Effekte zu minimieren. Die Motivation dieser Arbeit ist die These, dass die KaImanfilter basierte Lösung der Triangulation zur Lokalisierung und Kartierung aus Bildfolgen von unbemannten Drohnen realisierbar ist. Im Gegensatz zur klassischen Aero-Triangulation treten dadurch zusätzliche Aspekte in den Vordergrund, die in dieser Arbeit beleuchtet werden. Der erste Beitrag dieser Arbeit besteht in der Herleitung eines generellen Verfahrens zum rekursiven Verbessern im KaImanfilter mit impliziten Beobachtungsgleichungen. Wir zeigen, dass die klassischen Verfahren im Kalmanfilter eine Spezialisierung unseres Ansatzes darstellen. Im zweite Beitrag erweitern wir die klassische Modellierung für ein Einkameramodell im Kalmanfilter und formulieren linear berechenbare Bewegungsmodelle. Neben verschiedenen Verfahren zur Initialisierung von Neupunkten im Kalmanfilter aus der Literatur stellen wir in einem dritten Hauptbeitrag ein neues Verfahren vor. Am Beispiel von Bildfolgen eines unbemannten Flugobjektes zeigen wir in dieser Arbeit als vierten Beitrag, welche Genauigkeit zur Lokalisierung und Kartierung durch Triangulation möglich ist. Schließlich wird anhand von empirischen Untersuchungen unter Verwendung simulierter und realer Daten einer Bildfolge eines photogrammetrischen Streifens gezeigt und verglichen, welchen Einfluß die Initialisierungsmethoden für Neupunkte im Kalmanfilter haben und welche Genauigkeiten für diese Szenarien erreichbar sind.

    @PhDThesis{steffen2009visual,
    title = {Visual SLAM from image sequences acquired by unmanned aerial vehicles},
    author = {Steffen, Richard},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2009},
    abstract = {Die Verwendung der Triangulation zur L\"osung des Problems der gleichzeitigen Lokalisierung und Kartierung findet seit Jahren ihren Eingang in die Entwicklung autonomer Systeme. Aufgrund von Echtzeitanforderungen dieser Systeme erreichen rekursive Sch\"atzverfahren, insbesondere Kalmanfilter basierte Ans\"atze, gro{\ss}e Beliebtheit. Bedauerlicherweise, treten dabei durch die Nichtlinearit\"at der Triangulation einige Effekte auf, welche die Konsistenz und Genauigkeit der L\"osung hinsichtlich der gesch\"atzten Parameter ma{\ss}geblich beeinflussen. In der Literatur existieren dazu einige interessante L\"osungsans\"atze, um diese genauigkeitsrelevanten Effekte zu minimieren. Die Motivation dieser Arbeit ist die These, dass die KaImanfilter basierte L\"osung der Triangulation zur Lokalisierung und Kartierung aus Bildfolgen von unbemannten Drohnen realisierbar ist. Im Gegensatz zur klassischen Aero-Triangulation treten dadurch zus\"atzliche Aspekte in den Vordergrund, die in dieser Arbeit beleuchtet werden. Der erste Beitrag dieser Arbeit besteht in der Herleitung eines generellen Verfahrens zum rekursiven Verbessern im KaImanfilter mit impliziten Beobachtungsgleichungen. Wir zeigen, dass die klassischen Verfahren im Kalmanfilter eine Spezialisierung unseres Ansatzes darstellen. Im zweite Beitrag erweitern wir die klassische Modellierung f\"ur ein Einkameramodell im Kalmanfilter und formulieren linear berechenbare Bewegungsmodelle. Neben verschiedenen Verfahren zur Initialisierung von Neupunkten im Kalmanfilter aus der Literatur stellen wir in einem dritten Hauptbeitrag ein neues Verfahren vor. Am Beispiel von Bildfolgen eines unbemannten Flugobjektes zeigen wir in dieser Arbeit als vierten Beitrag, welche Genauigkeit zur Lokalisierung und Kartierung durch Triangulation m\"oglich ist. Schlie{\ss}lich wird anhand von empirischen Untersuchungen unter Verwendung simulierter und realer Daten einer Bildfolge eines photogrammetrischen Streifens gezeigt und verglichen, welchen Einflu{\ss} die Initialisierungsmethoden f\"ur Neupunkte im Kalmanfilter haben und welche Genauigkeiten f\"ur diese Szenarien erreichbar sind.},
    }

  • H. Strasdat, C. Stachniss, and W. Burgard, “Which Landmark is Useful? Learning Selection Policies for Navigation in Unknown Environments,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Kobe, Japan, 2009.
    [BibTeX]
    [none]
    @InProceedings{strasdat2009,
    title = {Which Landmark is Useful? Learning Selection Policies for Navigation in Unknown Environments},
    author = {H. Strasdat and Stachniss, C. and Burgard, W.},
    booktitle = icra,
    year = {2009},
    address = {Kobe, Japan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • J. Sturm, V. Predeap, C. Stachniss, C. Plagemann, K. Konolige, and W. Burgard, “Learning Kinematic Models for Articulated Objects,” in Proc. of the Intl. Conf. on Artificial Intelligence (IJCAI), Pasadena, CA, USA, 2009.
    [BibTeX]
    [none]
    @InProceedings{sturm2009a,
    title = {Learning Kinematic Models for Articulated Objects},
    author = {J. Sturm and V. Predeap and Stachniss, C. and C. Plagemann and K. Konolige and Burgard, W.},
    booktitle = ijcai,
    year = {2009},
    address = {Pasadena, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • J. Sturm, C. Stachniss, V. Predeap, C. Plagemann, K. Konolige, and W. Burgard, “Learning Kinematic Models for Articulated Objects,” in Online Proc. of the Learning Workshop (Snowbird), Clearwater, FL, USA, 2009.
    [BibTeX]
    [none]
    @InProceedings{sturm2009,
    title = {Learning Kinematic Models for Articulated Objects},
    author = {J. Sturm and Stachniss, C. and V. Predeap and C. Plagemann and K. Konolige and Burgard, W.},
    booktitle = {Online Proc. of the Learning Workshop (Snowbird)},
    year = {2009},
    address = {Clearwater, FL, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • J. Sturm, C. Stachniss, V. Predeap, C. Plagemann, K. Konolige, and W. Burgard, “Towards Understanding Articulated Objects,” in Workshop Integrating Mobility and Manipulation at Robotics: Science and Systems (RSS), Seattle, WA, USA, 2009.
    [BibTeX]
    [none]
    @InProceedings{sturm2009b,
    title = {Towards Understanding Articulated Objects},
    author = {J. Sturm and Stachniss, C. and V. Predeap and C. Plagemann and K. Konolige and Burgard, W.},
    booktitle = {Workshop Integrating Mobility and Manipulation at Robotics: Science and Systems (RSS)},
    year = {2009},
    address = {Seattle, WA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • J. R. Sveinsson, B. Waske, and J. A. Benediktsson, “Speckle reduction of TerraSAR-X imagery using TV segmentation,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2009. doi:10.1109/IGARSS.2009.5417412
    [BibTeX]

    The nonsubsampled contourlet transform (NSCT) is a new image representation approach that has sparser representation at both spatial and directional resolution and thus captures smooth contours in images. On the other hand, wavelet transform has sparser representation of homogeneous areas. In this paper, we are going to use the three combinations of undecimated wavelet and nonsubsampled contourlet transforms that was used in for denoising of TerraSAR-X images. Two of the methods use the undecimated wavelet transform to de-noise homogeneous areas and the nonsubsampled contourlet transform to denoise areas with edges. The segmentation between homogeneous areas and areas with edges is done by using total variation segmentation. The third method is a linear averaging of the two denoising methods. A thresholding in the wavelet and contourlet domain is done by non-linear functions which are adapted for each selected subband. The non-linear functions are based on sigmoid functions. Simulation results suggested that these denoising schemes achieve good and clean images.

    @InProceedings{sveinsson2009speckle,
    title = {Speckle reduction of TerraSAR-X imagery using TV segmentation},
    author = {Sveinsson, J.R. and Waske, Bj\"orn and Benediktsson, J.A.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2009},
    abstract = {The nonsubsampled contourlet transform (NSCT) is a new image representation approach that has sparser representation at both spatial and directional resolution and thus captures smooth contours in images. On the other hand, wavelet transform has sparser representation of homogeneous areas. In this paper, we are going to use the three combinations of undecimated wavelet and nonsubsampled contourlet transforms that was used in for denoising of TerraSAR-X images. Two of the methods use the undecimated wavelet transform to de-noise homogeneous areas and the nonsubsampled contourlet transform to denoise areas with edges. The segmentation between homogeneous areas and areas with edges is done by using total variation segmentation. The third method is a linear averaging of the two denoising methods. A thresholding in the wavelet and contourlet domain is done by non-linear functions which are adapted for each selected subband. The non-linear functions are based on sigmoid functions. Simulation results suggested that these denoising schemes achieve good and clean images.},
    doi = {10.1109/IGARSS.2009.5417412},
    keywords = {TV segmentation;TerraSAR-X imagery;directional resolution;image contours;image denoising;image representation;image segmentation;linear averaging;nonlinear functions;nonsubsampled contourlet transforms;sigmoid functions;spatial resolution;speckle reduction;total variation segmentation;undecimated wavelet transforms;feature extraction;geophysical image processing;geophysical techniques;image denoising;image representation;image resolution;image segmentation;radar imaging;remote sensing by radar;synthetic aperture radar;wavelet transforms;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • T. Udelhoven, S. van der Linden, B. Waske, M. Stellmes, and L. Hoffmann, “Hypertemporal Classification of Large Areas Using Decision Fusion,” IEEE Geoscience and Remote Sensing Letters, vol. 6, iss. 3, p. 592–596, 2009. doi:10.1109/LGRS.2009.2021960
    [BibTeX]

    A novel multiannual land-cover-classification scheme for classifying hypertemporal image data is suggested, which is based on a supervised decision fusion (DF) approach. This DF approach comprises two steps: First, separate support vector machines (SVMs) are trained for normalized difference vegetation index (NDVI) time-series and mean annual temperature values of three consecutive years. In the second step, the information of the preliminary continuous SVM outputs, which represent posterior probabilities of the class assignments, is fused using a second-level SVM classifier. We tested the approach using the 10-day maximum-value NDVI composites from the “Mediterranean Extended Daily one-km Advanced Very High Resolution Radiometer Data Set” (MEDOKADS). The approach increases the classification accuracy and robustness compared with another DF method (simple majority voting) and with a single SVM expert that is trained for the same multiannual periods. The results clearly demonstrate that DF is a reliable technique for large-area mapping using hypertemporal data sets.

    @Article{udelhoven2009hypertemporal,
    title = {Hypertemporal Classification of Large Areas Using Decision Fusion},
    author = {Udelhoven, Thomas and van der Linden, Sebastian and Waske, Bj\"orn and Stellmes, Marion and Hoffmann, Lucien},
    journal = {IEEE Geoscience and Remote Sensing Letters},
    year = {2009},
    month = jul,
    number = {3},
    pages = {592--596},
    volume = {6},
    abstract = {A novel multiannual land-cover-classification scheme for classifying hypertemporal image data is suggested, which is based on a supervised decision fusion (DF) approach. This DF approach comprises two steps: First, separate support vector machines (SVMs) are trained for normalized difference vegetation index (NDVI) time-series and mean annual temperature values of three consecutive years. In the second step, the information of the preliminary continuous SVM outputs, which represent posterior probabilities of the class assignments, is fused using a second-level SVM classifier. We tested the approach using the 10-day maximum-value NDVI composites from the "Mediterranean Extended Daily one-km Advanced Very High Resolution Radiometer Data Set" (MEDOKADS). The approach increases the classification accuracy and robustness compared with another DF method (simple majority voting) and with a single SVM expert that is trained for the same multiannual periods. The results clearly demonstrate that DF is a reliable technique for large-area mapping using hypertemporal data sets.},
    doi = {10.1109/LGRS.2009.2021960},
    owner = {waske},
    sn = {1545-598X},
    tc = {2},
    timestamp = {2012.09.04},
    ut = {WOS:000267764800048},
    z8 = {0},
    z9 = {2},
    zb = {0},
    }

  • S. Valero, J. Chanussot, J. A. Benediktsson, H. Talbot, and B. Waske, “Directional mathematical morphology for the detection of the road network in Very High Resolution remote sensing images,” in 16th IEEE International Conf. on Image Processing (ICIP), 2009. doi:10.1109/ICIP.2009.5414344
    [BibTeX]

    This paper presents a new method for extracting roads in Very High Resolution remotely sensed images based on advanced directional morphological operators. The proposed approach introduces the use of Path Openings and Closings in order to extract structural pixel information. These morphological operators remain flexible enough to fit rectilinear and slightly curved structures since they do not depend on the choice of a structural element shape and hence outperform standard approaches using rotating rectangular structuring elements. The method consists in building a granulometry chain using Path Openings and Closing to perform Morphological Profiles. For each pixel, the Morphological Profile constitutes the feature vector on which our road extraction is based.

    @InProceedings{valero2009directional,
    title = {Directional mathematical morphology for the detection of the road network in Very High Resolution remote sensing images},
    author = {Valero, S. and Chanussot, J. and Benediktsson, J.A. and Talbot, H. and Waske, Bj\"orn},
    booktitle = {16th IEEE International Conf. on Image Processing (ICIP)},
    year = {2009},
    abstract = {This paper presents a new method for extracting roads in Very High Resolution remotely sensed images based on advanced directional morphological operators. The proposed approach introduces the use of Path Openings and Closings in order to extract structural pixel information. These morphological operators remain flexible enough to fit rectilinear and slightly curved structures since they do not depend on the choice of a structural element shape and hence outperform standard approaches using rotating rectangular structuring elements. The method consists in building a granulometry chain using Path Openings and Closing to perform Morphological Profiles. For each pixel, the Morphological Profile constitutes the feature vector on which our road extraction is based.},
    doi = {10.1109/ICIP.2009.5414344},
    issn = {1522-4880},
    keywords = {directional mathematical morphology;morphological profiles;path closings;path openings;rectilinear structures;road network detection;slightly curved structures;structural pixel information;very high resolution remote sensing images;geophysical image processing;geophysical techniques;mathematical morphology;remote sensing;roads;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • X. Wang, B. Waske, and J. A. Benediktsson, “Ensemble methods for spectral-spatial classification of urban hyperspectral data,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2009. doi:10.1109/IGARSS.2009.5417534
    [BibTeX]

    Classification of hyperspectral data with high spatial resolution from urban areas is investigated. The approach is an extension of existing approaches, using both spectral and spatial information for classification. The spatial information is derived by mathematical morphology and principal components of the hyperspectral data set, generating a set of different morphological profiles. The whole data set is classified by the Random Forest algorithm. However, the computational complexity as well as the increased dimensionality and redundancy of data sets based on morphological profiles are potential drawbacks. Thus, in the presented study, feature selection is applied, using nonparametric weighted feature extraction and the variable importance of the random forests. The proposed approach is applied to ROSIS data from an urban area. The experimental results demonstrate that a feature reduction is useful in terms of accuracy. Moreover, the proposed approach also shows excellent results with a limited training set.

    @InProceedings{wang2009ensemble,
    title = {Ensemble methods for spectral-spatial classification of urban hyperspectral data},
    author = {Xin-Lu Wang and Waske, Bj\"orn and Benediktsson, J.A.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2009},
    abstract = {Classification of hyperspectral data with high spatial resolution from urban areas is investigated. The approach is an extension of existing approaches, using both spectral and spatial information for classification. The spatial information is derived by mathematical morphology and principal components of the hyperspectral data set, generating a set of different morphological profiles. The whole data set is classified by the Random Forest algorithm. However, the computational complexity as well as the increased dimensionality and redundancy of data sets based on morphological profiles are potential drawbacks. Thus, in the presented study, feature selection is applied, using nonparametric weighted feature extraction and the variable importance of the random forests. The proposed approach is applied to ROSIS data from an urban area. The experimental results demonstrate that a feature reduction is useful in terms of accuracy. Moreover, the proposed approach also shows excellent results with a limited training set.},
    doi = {10.1109/IGARSS.2009.5417534},
    keywords = {ROSIS data;computational complexity;data dimensionality;data redundancy;ensemble methods;feature selection;hyperspectral data classification;mathematical morphology;nonparametric weighted feature extraction;principal component analysis;random forest algorithm;spatial information classification;spectral information classification;urban hyperspectral data;decision trees;feature extraction;geophysical image processing;image classification;principal component analysis;remote sensing;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • B. Waske, J. A. Benediktsson, K. Arnason, and J. R. Sveinsson, “Mapping of hyperspectral AVIRIS data using machine-learning algorithms,” Canadian Journal of Remote Sensing, vol. 35, p. 106–116, 2009. doi:10.5589/m09-018
    [BibTeX]

    Hyperspectral imaging provides detailed spectral and spatial information from the land cover that enables a precise differentiation between various surface materials. on the other hand, the performance of traditional and widely used statistical classification methods is often limited in this context, and thus alternative methods are required. In the study presented here, the performance of two machine-learning techniques, namely support vector machines (SVMs) and random forests (RFs), is investigated and the classification results are compared with those from well-known methods (i.e., maximum likelihood classifier and spectral angle mapper). The classifiers are applied to an Airborne Visible/Infrared Imaging Spectrometer (AVIRIS) dataset that was acquired near the Hekla volcano in Iceland. The results clearly show the advantages of the two proposed classifier algorithms in terms of accuracy. They significantly outperform the other methods and achieve overall accuracies of approximately 90%. Although SVM and RF show some diversity in the classification results, the global performance of the two classifiers is very similar. Thus, both methods can be considered attractive for the classification of hyperspectral data.

    @Article{waske2009mapping,
    title = {Mapping of hyperspectral AVIRIS data using machine-learning algorithms},
    author = {Waske, Bj\"orn and Benediktsson, Jon Atli and Arnason, Kolbeinn and Sveinsson, Johannes R.},
    journal = {Canadian Journal of Remote Sensing},
    year = {2009},
    pages = {106--116},
    volume = {35},
    abstract = {Hyperspectral imaging provides detailed spectral and spatial information from the land cover that enables a precise differentiation between various surface materials. on the other hand, the performance of traditional and widely used statistical classification methods is often limited in this context, and thus alternative methods are required. In the study presented here, the performance of two machine-learning techniques, namely support vector machines (SVMs) and random forests (RFs), is investigated and the classification results are compared with those from well-known methods (i.e., maximum likelihood classifier and spectral angle mapper). The classifiers are applied to an Airborne Visible/Infrared Imaging Spectrometer (AVIRIS) dataset that was acquired near the Hekla volcano in Iceland. The results clearly show the advantages of the two proposed classifier algorithms in terms of accuracy. They significantly outperform the other methods and achieve overall accuracies of approximately 90%. Although SVM and RF show some diversity in the classification results, the global performance of the two classifiers is very similar. Thus, both methods can be considered attractive for the classification of hyperspectral data.},
    doi = {10.5589/m09-018},
    owner = {waske},
    si = {SP},
    sn = {1712-7971},
    su = {1},
    tc = {3},
    timestamp = {2012.09.04},
    ut = {WOS:000275720100008},
    z8 = {1},
    z9 = {3},
    zb = {1},
    }

  • B. Waske, J. A. Benediktsson, and J. R. Sveinsson, “Fusion of multisource data sets from agricultural areas for improved land cover classification,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2009. doi:10.1109/IGARSS.2009.5417536
    [BibTeX]

    An approach for spectral-spatial classification of multisource remote sensing data from agricultural areas is addressed. Mathematical morphology is used to derive the spatial information from the data sets. The different data sources (i.e., SAR and multispectral) are classified by support vector machines (SVM). Afterwards, the SVM outputs are transferred to probability measurements. These probability values are combined by different fusion strategies, to derive the final classification result. Comparing the results based on mathematical morphology the total accuracy increased by 6% compared to the pure-pixel classification results. Moreover the transfer of the SVM outputs into probability values and the subsequent fusion further increases the classification accuracy, resulting in an accuracy of 78.5%.

    @InProceedings{waske2009fusion,
    title = {Fusion of multisource data sets from agricultural areas for improved land cover classification},
    author = {Waske, Bj\"orn and Benediktsson, J.A. and Sveinsson, J.R.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2009},
    abstract = {An approach for spectral-spatial classification of multisource remote sensing data from agricultural areas is addressed. Mathematical morphology is used to derive the spatial information from the data sets. The different data sources (i.e., SAR and multispectral) are classified by support vector machines (SVM). Afterwards, the SVM outputs are transferred to probability measurements. These probability values are combined by different fusion strategies, to derive the final classification result. Comparing the results based on mathematical morphology the total accuracy increased by 6% compared to the pure-pixel classification results. Moreover the transfer of the SVM outputs into probability values and the subsequent fusion further increases the classification accuracy, resulting in an accuracy of 78.5%.},
    doi = {10.1109/IGARSS.2009.5417536},
    keywords = {SAR remote sensing data;SVM;agricultural land cover classification;mathematical morphology;multisource data sets;multisource remote sensing data;multispectral remote sensing data;probability measurements;pure-pixel classification;spectral-spatial classification;support vector machines;geophysical image processing;geophysical techniques;image classification;mathematical morphology;remote sensing by radar;support vector machines;synthetic aperture radar;terrain mapping;vegetation mapping;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • B. Waske and M. Braun, “Classifier ensembles for land cover mapping using multitemporal SAR imagery,” ISPRS Journal of Photogrammetry and Remote Sensing, vol. 64, iss. 5, p. 450–457, 2009. doi:10.1016/j.isprsjprs.2009.01.003
    [BibTeX]

    SAR data are almost independent from weather conditions, and thus are well suited for mapping of seasonally changing variables such as land cover. In regard to recent and upcoming missions, multitemporal and multi-frequency approaches become even more attractive. In the present study, classifier ensembles (i.e., boosted decision tree and random forests) are applied to multi-temporal C-band SAR data, from different study sites and years. A detailed accuracy assessment shows that classifier ensembles, in particularly random forests, outperform standard approaches like a single decision tree and a conventional maximum likelihood classifier by more than 10% independently from the site and year. They reach up to almost 84% of overall accuracy in rural areas with large plots. Visual interpretation confirms the statistical accuracy assessment and reveals that also typical random noise is considerably reduced. In addition the results demonstrate that random forests are less sensitive to the number of training samples and perform well even with only a small number. Random forests are computationally highly efficient and are hence considered very well suited for land cover classifications of future multifrequency and multitemporal stacks of SAR imagery. (C) 2009 International Society for Photogrammetry and Remote Sensing, Inc. (ISPRS). Published by Elsevier B.V. All rights reserved.

    @Article{waske2009classifier,
    title = {Classifier ensembles for land cover mapping using multitemporal SAR imagery},
    author = {Waske, Bj\"orn and Braun, Matthias},
    journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
    year = {2009},
    month = sep,
    number = {5},
    pages = {450--457},
    volume = {64},
    abstract = {SAR data are almost independent from weather conditions, and thus are well suited for mapping of seasonally changing variables such as land cover. In regard to recent and upcoming missions, multitemporal and multi-frequency approaches become even more attractive. In the present study, classifier ensembles (i.e., boosted decision tree and random forests) are applied to multi-temporal C-band SAR data, from different study sites and years. A detailed accuracy assessment shows that classifier ensembles, in particularly random forests, outperform standard approaches like a single decision tree and a conventional maximum likelihood classifier by more than 10% independently from the site and year. They reach up to almost 84% of overall accuracy in rural areas with large plots. Visual interpretation confirms the statistical accuracy assessment and reveals that also typical random noise is considerably reduced. In addition the results demonstrate that random forests are less sensitive to the number of training samples and perform well even with only a small number. Random forests are computationally highly efficient and are hence considered very well suited for land cover classifications of future multifrequency and multitemporal stacks of SAR imagery. (C) 2009 International Society for Photogrammetry and Remote Sensing, Inc. (ISPRS). Published by Elsevier B.V. All rights reserved.},
    doi = {10.1016/j.isprsjprs.2009.01.003},
    owner = {waske},
    sn = {0924-2716},
    tc = {10},
    timestamp = {2012.09.04},
    ut = {WOS:000273381000003},
    z8 = {0},
    z9 = {10},
    zb = {2},
    }

  • B. Waske, M. Chi, J. A. Benediktsson, S. van der Linden, and B. Koetz, “Geospatial Technology for Earth Observation,” in Geospatial Technology for Earth Observation, D. Li, J. Shan, and J. Gong, Eds., Springer US, 2009, p. 203–233. doi:10.1007/978-1-4419-0050-0_8
    [BibTeX]

    During the last decades the manner how the Earth is being observed was revolutionized. Earth Observation (EO) systems became a valuable and powerful tool to monitor the Earth and had significant impact on the acquisition and analysis of environmental data (Rosenquist et al. 2003). Currently, EO data play a major role in supporting decision-making and surveying compliance of several multilateral environmental treaties, such as the Kyoto Protocol, the Convention on Biological Diversity, or the European initiative Global Monitoring for Environment and Security, GMES (Peter 2004, Rosenquist et al. 2003, Backhaus and Beule 2005). However, the need for such long-term monitoring of the Earth’s surface requires the standardized and coordinated use of global EO data sets, which has led, e.g., to the international Global Earth Observation System of Systems (GEOSS) initiative as well as to the Global Climate Observation System (GCOS) implementation plan (GCOS 2004, GEO 2005). The evolving EO technologies together with the requirements and standards arising from their exploitation demand increasingly improving algorithms, especially in the field of land cover classification

    @InBook{waske2009geospatial,
    title = {Geospatial Technology for Earth Observation},
    author = {Waske, Bj\"orn and Chi, Mingmin and Benediktsson, Jon Atli and van der Linden, Sebastian and Koetz, Benjamin},
    chapter = {Algorithms and Applications for Land Cover Classification - A Review},
    editor = {Li, Deren and Shan, Jie and Gong, Jianya},
    pages = {203--233},
    publisher = {Springer US},
    year = {2009},
    abstract = {During the last decades the manner how the Earth is being observed was revolutionized. Earth Observation (EO) systems became a valuable and powerful tool to monitor the Earth and had significant impact on the acquisition and analysis of environmental data (Rosenquist et al. 2003). Currently, EO data play a major role in supporting decision-making and surveying compliance of several multilateral environmental treaties, such as the Kyoto Protocol, the Convention on Biological Diversity, or the European initiative Global Monitoring for Environment and Security, GMES (Peter 2004, Rosenquist et al. 2003, Backhaus and Beule 2005). However, the need for such long-term monitoring of the Earth's surface requires the standardized and coordinated use of global EO data sets, which has led, e.g., to the international Global Earth Observation System of Systems (GEOSS) initiative as well as to the Global Climate Observation System (GCOS) implementation plan (GCOS 2004, GEO 2005). The evolving EO technologies together with the requirements and standards arising from their exploitation demand increasingly improving algorithms, especially in the field of land cover classification},
    affiliation = {Faculty of Electrical and Computer Engineering, University of Iceland, 107 Reykjavik, Iceland},
    booktitle = {Geospatial Technology for Earth Observation},
    doi = {10.1007/978-1-4419-0050-0_8},
    isbn = {978-1-4419-0050-0},
    keyword = {Earth and Environmental Science},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • B. Waske, M. Fauvel, J. A. Benediktsson, and J. Chanussot, “Machine Learning Techniques in Remote Sensing Data Analysis,” in Kernel Methods for Remote Sensing Data Analysis, G. Camps-Valls and L. Bruzzone, Eds., John Wiley & Sons, Ltd, 2009, p. 1–24. doi:10.1002/9780470748992.ch1
    [BibTeX]

    Several applications have been developed in the field of remote sensing image analysis during the last decades. Besides well-known statistical approaches, many recent methods are based on techniques taken from the field of machine learning. A major aim of machine learning algorithms in remote sensing is supervised classification, which is perhaps the most widely used image classification approach. In this chapter a brief introduction to machine learning and the different paradigms in remote sensing is given. Moreover this chapter briefly discusses the use of recent developments in supervised classification techniques such as neural networks, support vector machines and multiple classifier systems.

    @InBook{waske2009machine,
    title = {Machine Learning Techniques in Remote Sensing Data Analysis},
    author = {Waske, Bj\"orn and Fauvel, Mathieu and Benediktsson, Jon Atli and Chanussot, Jocelyn},
    chapter = {Machine Learning Techniques in Remote Sensing Data Analysis},
    editor = {Camps-Valls, Gustavo and Bruzzone, Lorenzo},
    pages = {1--24},
    publisher = {John Wiley \& Sons, Ltd},
    year = {2009},
    abstract = {Several applications have been developed in the field of remote sensing image analysis during the last decades. Besides well-known statistical approaches, many recent methods are based on techniques taken from the field of machine learning. A major aim of machine learning algorithms in remote sensing is supervised classification, which is perhaps the most widely used image classification approach. In this chapter a brief introduction to machine learning and the different paradigms in remote sensing is given. Moreover this chapter briefly discusses the use of recent developments in supervised classification techniques such as neural networks, support vector machines and multiple classifier systems.},
    booktitle = {Kernel Methods for Remote Sensing Data Analysis},
    doi = {10.1002/9780470748992.ch1},
    isbn = {9780470748992},
    keywords = {machine learning techniques in remote sensing data analysis, machine learning algorithms in remote sensing and supervised classification, remote sensing challenges, machine learning (ML) - artificial intelligence area and learning from data, remote sensing paradigms, feature extraction and feature selection and dimensionality reduction, Tasseled Cap Transformation, ISODATA (iterative self-organizing data analysis), neural networks (NN) in pattern recognition and remote sensing context, development in field of (supervised) classification machine learning concepts},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • B. Waske, S. van der Linden, J. A. Benediktsson, A. Rabe, and P. Hostert, “Impact of different morphological profiles on the classification accuracy of urban hyperspectral data,” in First Workshop on Hyperspectral Image and Signal Processing: Evolution in Remote Sensing (WHISPERS), 2009. doi:10.1109/WHISPERS.2009.5289078
    [BibTeX]

    We present a detailed study on the classification of urban hyperspectral data with morphological profiles (MP). Although such a spectral-spatial classification approach may significantly increase achieved accuracy, the computational complexity as well as the increased dimensionality and redundancy of such data sets are potential drawbacks. This can be overcome by feature selection. Moreover it is useful to derive detailed information on the contribution of different components from MP to the classification accuracy by evaluating these subsets. We apply a wrapper approach for feature selection based on support vector machines (SVM) with sequential feature forward selection (FFS) search strategy to two hyperspectral data sets that contain the first principal components (PC) and various corresponding MP from an urban area. In doing so, we identify feature subsets of increasing size that perform best in terms of kappa for the given setup. Results clearly demonstrate that maximum classification accuracies are achieved already on small feature subsets with few morphological profiles.

    @InProceedings{waske2009impact,
    title = {Impact of different morphological profiles on the classification accuracy of urban hyperspectral data},
    author = {Waske, Bj\"orn and van der Linden, S. and Benediktsson, J.A. and Rabe, A. and Hostert, P.},
    booktitle = {First Workshop on Hyperspectral Image and Signal Processing: Evolution in Remote Sensing (WHISPERS)},
    year = {2009},
    abstract = {We present a detailed study on the classification of urban hyperspectral data with morphological profiles (MP). Although such a spectral-spatial classification approach may significantly increase achieved accuracy, the computational complexity as well as the increased dimensionality and redundancy of such data sets are potential drawbacks. This can be overcome by feature selection. Moreover it is useful to derive detailed information on the contribution of different components from MP to the classification accuracy by evaluating these subsets. We apply a wrapper approach for feature selection based on support vector machines (SVM) with sequential feature forward selection (FFS) search strategy to two hyperspectral data sets that contain the first principal components (PC) and various corresponding MP from an urban area. In doing so, we identify feature subsets of increasing size that perform best in terms of kappa for the given setup. Results clearly demonstrate that maximum classification accuracies are achieved already on small feature subsets with few morphological profiles.},
    doi = {10.1109/WHISPERS.2009.5289078},
    keywords = {FFS search;computational complexity;feature forward selection;hyperspectral image;mathematical morphology;morphological profile;principal component;spectral-spatial classification;support vector machine;urban hyperspectral data classification;wrapper approach;feature extraction;image classification;mathematical morphology;principal component analysis;support vector machines;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • S. Wenzel and W. Förstner, “The Role of Sequences for Incremental Learning,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2009-04, 2009.
    [BibTeX] [PDF]

    This report points out the role of sequences of samples for training an incremental learning method. We define characteristics of incremental learning methods to describe the influence of sample ordering on the performance of a learned model. Different types of experiments evaluate these properties for two different datasets and two different incremental learning methods. We show how to find sequences of classes for training just based on the data to get always best possible error rates. This is based on the estimation of Bayes error bounds.

    @TechReport{wenzel2009role,
    title = {The Role of Sequences for Incremental Learning},
    author = {Wenzel, Susanne and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2009},
    month = oct,
    number = {TR-IGG-P-2009-04},
    abstract = {This report points out the role of sequences of samples for training an incremental learning method. We define characteristics of incremental learning methods to describe the influence of sample ordering on the performance of a learned model. Different types of experiments evaluate these properties for two different datasets and two different incremental learning methods. We show how to find sequences of classes for training just based on the data to get always best possible error rates. This is based on the estimation of Bayes error bounds.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2009Role.pdf},
    }

  • K. M. Wurm, R. Kuemmerle, C. Stachniss, and W. Burgard, “Improving Robot Navigation in Structured Outdoor Environments by Identifying Vegetation from Laser Data,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2009.
    [BibTeX] [PDF]
    [none]
    @InProceedings{wurm2009,
    title = {Improving Robot Navigation in Structured Outdoor Environments by Identifying Vegetation from Laser Data},
    author = {K.M. Wurm and R. Kuemmerle and Stachniss, C. and Burgard, W.},
    booktitle = iros,
    year = {2009},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm09iros.pdf},
    }

  • M. Y. Yang, “Multiregion Level-set Segmentation of Synthetic Aperture Radar Images,” in IEEE International Conf. on Image Processing, Cairo, 2009, p. 1717–1720. doi:10.1109/ICIP.2009.5413378
    [BibTeX] [PDF]

    Due to the presence of speckle, segmentation of SAR images is generally acknowledged as a difficult problem. A large effort has been done in order to cope with the influence of speckle noise on image segmentation such as edge detection or direct global segmentation. Recent works address this problem by using statistical image representation and deformable models. We suggest a novel variational approach to SAR image segmentation, which consists of minimizing a functional containing an original observation term derived from maximum a posteriori (MAP) estimation framework and a Gamma image representation. The minimization is carried out efficiently by a new multiregion method which embeds a simple partition assumption directly in curve evolution to guarantee a partition of the image domain from an arbitrary initial partition. Experiments on both synthetic and real images show the effectiveness of the proposed method.

    @InProceedings{yang2009multiregion,
    title = {Multiregion Level-set Segmentation of Synthetic Aperture Radar Images},
    author = {Yang, Michael Ying},
    booktitle = {IEEE International Conf. on Image Processing},
    year = {2009},
    address = {Cairo},
    pages = {1717--1720},
    abstract = {Due to the presence of speckle, segmentation of SAR images is generally acknowledged as a difficult problem. A large effort has been done in order to cope with the influence of speckle noise on image segmentation such as edge detection or direct global segmentation. Recent works address this problem by using statistical image representation and deformable models. We suggest a novel variational approach to SAR image segmentation, which consists of minimizing a functional containing an original observation term derived from maximum a posteriori (MAP) estimation framework and a Gamma image representation. The minimization is carried out efficiently by a new multiregion method which embeds a simple partition assumption directly in curve evolution to guarantee a partition of the image domain from an arbitrary initial partition. Experiments on both synthetic and real images show the effectiveness of the proposed method.},
    doi = {10.1109/ICIP.2009.5413378},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2009Multiregion.pdf},
    }

  • Y. Yang, “Remote sensing image registration via active contour model,” International Journal of Electronics and Communications, vol. 65, p. 227–234, 2009. doi:10.1016/j.aeue.2008.01.003
    [BibTeX]

    Image registration is the process by which we determine a transformation that provides the most accurate match between two images. The search for the matching transformation can be automated with the use of a suitable metric, but it can be very time-consuming and tedious. In this paper, we introduce a registration algorithm that combines active contour segmentation together with mutual information. Our approach starts with a segmentation procedure. It is formed by a novel geometric active contour, which incorporates edge knowledge, namely Edgeflow, into active contour model. Two edgemap images filled with closed contours are obtained. After ruling out mismatched curves, we use mutual information (MI) as a similarity measure to register two edgemap images. Experimental results are provided to illustrate the performance of the proposed registration algorithm using both synthetic and multisensor images. Quantitative error analysis is also provided and several images are shown for subjective evaluation.

    @Article{yang2009remote,
    title = {Remote sensing image registration via active contour model},
    author = {Yang, Ying},
    journal = {International Journal of Electronics and Communications},
    year = {2009},
    pages = {227--234},
    volume = {65},
    abstract = {Image registration is the process by which we determine a transformation that provides the most accurate match between two images. The search for the matching transformation can be automated with the use of a suitable metric, but it can be very time-consuming and tedious. In this paper, we introduce a registration algorithm that combines active contour segmentation together with mutual information. Our approach starts with a segmentation procedure. It is formed by a novel geometric active contour, which incorporates edge knowledge, namely Edgeflow, into active contour model. Two edgemap images filled with closed contours are obtained. After ruling out mismatched curves, we use mutual information (MI) as a similarity measure to register two edgemap images. Experimental results are provided to illustrate the performance of the proposed registration algorithm using both synthetic and multisensor images. Quantitative error analysis is also provided and several images are shown for subjective evaluation.},
    doi = {10.1016/j.aeue.2008.01.003},
    }

2008

  • C. Beder and R. Steffen, “Incremental estimation without specifying a-priori covariance matrices for the novel parameters,” in VLMP Workshop on CVPR, Anchorage, USA, 2008. doi:10.1109/CVPRW.2008.4563139
    [BibTeX] [PDF]

    We will present a novel incremental algorithm for the task of online least-squares estimation. Our approach aims at combining the accuracy of least-squares estimation and the fast computation of recursive estimation techniques like the Kalman filter. Analyzing the structure of least-squares estimation we devise a novel incremental algorithm, which is able to introduce new unknown parameters and observations into an estimation simultaneously and is equivalent to the optimal overall estimation in case of linear models. It constitutes a direct generalization of the well-known Kalman filter allowing to augment the state vector inside the update step. In contrast to classical recursive estimation techniques no artificial initial covariance for the new unknown parameters is required here. We will show, how this new algorithm allows more flexible parameter estimation schemes especially in the case of scene and motion reconstruction from image sequences. Since optimality is not guaranteed in the non-linear case we will also compare our incremental estimation scheme to the optimal bundle adjustment on a real image sequence. It will be shown that competitive results are achievable using the proposed technique.

    @InProceedings{beder2008incremental,
    title = {Incremental estimation without specifying a-priori covariance matrices for the novel parameters},
    author = {Beder, Christian and Steffen, Richard},
    booktitle = {VLMP Workshop on CVPR},
    year = {2008},
    address = {Anchorage, USA},
    abstract = {We will present a novel incremental algorithm for the task of online least-squares estimation. Our approach aims at combining the accuracy of least-squares estimation and the fast computation of recursive estimation techniques like the Kalman filter. Analyzing the structure of least-squares estimation we devise a novel incremental algorithm, which is able to introduce new unknown parameters and observations into an estimation simultaneously and is equivalent to the optimal overall estimation in case of linear models. It constitutes a direct generalization of the well-known Kalman filter allowing to augment the state vector inside the update step. In contrast to classical recursive estimation techniques no artificial initial covariance for the new unknown parameters is required here. We will show, how this new algorithm allows more flexible parameter estimation schemes especially in the case of scene and motion reconstruction from image sequences. Since optimality is not guaranteed in the non-linear case we will also compare our incremental estimation scheme to the optimal bundle adjustment on a real image sequence. It will be shown that competitive results are achievable using the proposed technique.},
    doi = {10.1109/CVPRW.2008.4563139},
    url = {https://www.ipb.uni-bonn.de/pdfs/Beder2008Incremental.pdf},
    }

  • J. A. Benediktsson, X. Ceamanos Garcia, B. Waske, J. Chanussot, J. R. Sveinsson, and M. Fauvel, “Ensemble Methods for Classification of Hyperspectral Data,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2008. doi:10.1109/IGARSS.2008.4778793
    [BibTeX]

    The classification of hyperspectral data is addressed using a classifier ensemble based on Support Vector Machines (SVM). First of all, the hyperspectral data set is decomposed into few sources according to the spectral bands correlation. Then, each source is treated separately and classified by an SVM classifier. Finally, all outputs are used as inputs for the final decision fusion, performed by an additional SVM classifier. The results of experiments, clearly show that the proposed SVM-based decision fusion outperforms a single SVM classifier in terms of overall accuracies.

    @InProceedings{benediktsson2008ensemble,
    title = {Ensemble Methods for Classification of Hyperspectral Data},
    author = {Benediktsson, Jon Atli and Ceamanos Garcia, X. and Waske, Bj\"orn and Chanussot, J. and Sveinsson, J.R. and Fauvel, M.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2008},
    abstract = {The classification of hyperspectral data is addressed using a classifier ensemble based on Support Vector Machines (SVM). First of all, the hyperspectral data set is decomposed into few sources according to the spectral bands correlation. Then, each source is treated separately and classified by an SVM classifier. Finally, all outputs are used as inputs for the final decision fusion, performed by an additional SVM classifier. The results of experiments, clearly show that the proposed SVM-based decision fusion outperforms a single SVM classifier in terms of overall accuracies.},
    doi = {10.1109/IGARSS.2008.4778793},
    keywords = {Gaussian maximum likelihood method;SVM classifier;Support Vector Machines;decision fusion;ensemble classifier method;hyperspectral data classification;multisensor image classification;pattern recognition;spectral band correlation;geophysical techniques;geophysics computing;image classification;image processing;maximum likelihood estimation;pattern recognition;remote sensing;support vector machines;},
    timestamp = {2012.09.05},
    }

  • T. Dickscheid, T. Läbe, and W. Förstner, “Benchmarking Automatic Bundle Adjustment Results,” in 21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS), Beijing, China, 2008, p. 7–12, Part B3a.
    [BibTeX] [PDF]

    In classical photogrammetry, point observations are manually determined by an operator for performing the bundle adjustment of a sequence of images. In such cases, a comparison of different estimates is usually carried out with respect to the estimated 3D object points. Today, a broad range of automatic methods are available for extracting and matching point features across images, even in the case of widely separated views and under strong deformations. This allows for fully automatic solutions to the relative orientation problem, and even to the bundle triangulation in case that manually measured control points are available. However, such systems often contain random subprocedures like RANSAC for eliminating wrong correspondences, yielding different 3D points but hopefully similar orientation parameters. This causes two problems for the evaluation: First, the randomness of the algorithm has an influence on its stability, and second, we are constrained to compare the orientation parameters instead of the 3D points. We propose a method for benchmarking automatic bundle adjustments which takes these constraints into account and uses the orientation parameters directly. Given sets of corresponding orientation parameters, we require our benchmark test to address their consistency of the form deviation and the internal precision and their precision level related to the precision of a reference data set. Besides comparing different bundle adjustment methods, the approach may be used to safely evaluate effects of feature operators, matching strategies, control parameters and other design decisions for a particular method. The goal of this paper is to derive appropriate measures to cover these aspects, describe a coherent benchmarking scheme and show the feasibility of the approach using real data.

    @InProceedings{dickscheid2008benchmarking,
    title = {Benchmarking Automatic Bundle Adjustment Results},
    author = {Dickscheid, Timo and L\"abe, Thomas and F\"orstner, Wolfgang},
    booktitle = {21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS)},
    year = {2008},
    address = {Beijing, China},
    pages = {7--12, Part B3a},
    abstract = {In classical photogrammetry, point observations are manually determined by an operator for performing the bundle adjustment of a sequence of images. In such cases, a comparison of different estimates is usually carried out with respect to the estimated 3D object points. Today, a broad range of automatic methods are available for extracting and matching point features across images, even in the case of widely separated views and under strong deformations. This allows for fully automatic solutions to the relative orientation problem, and even to the bundle triangulation in case that manually measured control points are available. However, such systems often contain random subprocedures like RANSAC for eliminating wrong correspondences, yielding different 3D points but hopefully similar orientation parameters. This causes two problems for the evaluation: First, the randomness of the algorithm has an influence on its stability, and second, we are constrained to compare the orientation parameters instead of the 3D points. We propose a method for benchmarking automatic bundle adjustments which takes these constraints into account and uses the orientation parameters directly. Given sets of corresponding orientation parameters, we require our benchmark test to address their consistency of the form deviation and the internal precision and their precision level related to the precision of a reference data set. Besides comparing different bundle adjustment methods, the approach may be used to safely evaluate effects of feature operators, matching strategies, control parameters and other design decisions for a particular method. The goal of this paper is to derive appropriate measures to cover these aspects, describe a coherent benchmarking scheme and show the feasibility of the approach using real data.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Dickscheid2008Benchmarking.pdf},
    }

  • M. Drauschke, “Description of Stable Regions IPM,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2008-03, 2008.
    [BibTeX] [PDF]

    The Stable Regions Image Processing Module is a low-level region detector. It delivers image parts of interest without any further interpretation. These image parts are all regions of an image which do not change much over a certain range in scale space of the image. The output of this IPM is a list of polygons of any shape and their rectangular bounding boxes, which both are saved into an xml-file.

    @TechReport{drauschke2008description,
    title = {Description of Stable Regions IPM},
    author = {Drauschke, Martin},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2008},
    month = mar,
    number = {TR-IGG-P-2008-03},
    abstract = {The Stable Regions Image Processing Module is a low-level region detector. It delivers image parts of interest without any further interpretation. These image parts are all regions of an image which do not change much over a certain range in scale space of the image. The output of this IPM is a list of polygons of any shape and their rectangular bounding boxes, which both are saved into an xml-file.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2008Description.pdf},
    }

  • M. Drauschke, “Feature Subset Selection with Adaboost and ADTboost,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2008-04, 2008.
    [BibTeX] [PDF]

    This technical report presents feature subset selection methods for two boosting classi cation frameworks: Adaboost and ADTboost.

    @TechReport{drauschke2008feature,
    title = {Feature Subset Selection with Adaboost and ADTboost},
    author = {Drauschke, Martin},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2008},
    month = mar,
    number = {TR-IGG-P-2008-04},
    abstract = {This technical report presents feature subset selection methods for two boosting classi cation frameworks: Adaboost and ADTboost.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2008Feature.pdf},
    }

  • M. Drauschke, “Multi-class ADTboost,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2008-06, 2008.
    [BibTeX] [PDF]

    This technical report gives a short review on boosting with alternating decision trees (ADTboost), which has been proposed by Freund & Mason (1999) and refined by De Comite et al. (2001). This approach is designed for two-class problems, and we extend it towards multi-class classification. The advantage of a multi-class boosting algorithm is its usage in scene interpretation with various kinds of objects. In these cases, two-class approaches will lead to several one class versus background (the other classes) classifications, where we must solve unappropriate results like “always background” or “two or more valid classes” for a sample.

    @TechReport{drauschke2008multi,
    title = {Multi-class ADTboost},
    author = {Drauschke, Martin},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2008},
    month = aug,
    number = {TR-IGG-P-2008-06},
    abstract = {This technical report gives a short review on boosting with alternating decision trees (ADTboost), which has been proposed by Freund & Mason (1999) and refined by De Comite et al. (2001). This approach is designed for two-class problems, and we extend it towards multi-class classification. The advantage of a multi-class boosting algorithm is its usage in scene interpretation with various kinds of objects. In these cases, two-class approaches will lead to several one class versus background (the other classes) classifications, where we must solve unappropriate results like "always background" or "two or more valid classes" for a sample.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2008Multi.pdf},
    }

  • M. Drauschke, “Verbesserung des Multi-Dodgings mittels bikubischer Interpolation,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2008-07, 2008.
    [BibTeX] [PDF]

    Aufgabenstellung: Digitalisierte 16-Bit-Luftbilder sollen automatisch verbessert werden. Dazu haben wir in (1) und (2) den Multi-Dodging-Ansatz vorgeschlagen. In diesem Verfahren wird ein Bild in sich nicht überlappende Ausschnitte (Patches) zerlegt. Dann wird in jedem dieser Bildausschnitte eine Histogrammverebnung durchgeführt. Da dieses Vorgehen die Patchgrenzen im verbesserten Bild hinterlässt, wurde abschließend zwischen den Patches bilinear interpoliert. In dieser Arbeit wird untersucht, ob die Verwendung einer bikubischen Interpolation an Stelle der bilinearen zu besseren Ergebnissen führt.

    @TechReport{drauschke2008verbesserung,
    title = {Verbesserung des Multi-Dodgings mittels bikubischer Interpolation},
    author = {Drauschke, Martin},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2008},
    number = {TR-IGG-P-2008-07},
    abstract = {Aufgabenstellung: Digitalisierte 16-Bit-Luftbilder sollen automatisch verbessert werden. Dazu haben wir in (1) und (2) den Multi-Dodging-Ansatz vorgeschlagen. In diesem Verfahren wird ein Bild in sich nicht \"uberlappende Ausschnitte (Patches) zerlegt. Dann wird in jedem dieser Bildausschnitte eine Histogrammverebnung durchgef\"uhrt. Da dieses Vorgehen die Patchgrenzen im verbesserten Bild hinterl\"asst, wurde abschlie{\ss}end zwischen den Patches bilinear interpoliert. In dieser Arbeit wird untersucht, ob die Verwendung einer bikubischen Interpolation an Stelle der bilinearen zu besseren Ergebnissen f\"uhrt.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2008Verbesserung.pdf},
    }

  • M. Drauschke and W. Förstner, “Comparison of Adaboost and ADTboost for Feature Subset Selection,” in PRIS 2008, Barcelona, Spain, 2008, p. 113–122.
    [BibTeX] [PDF]

    This paper addresses the problem of feature selection within classification processes. We present a comparison of a feature subset selection with respect to two boosting methods, Adaboost and ADTboost. In our evaluation, we have focused on three different criteria: the classification error and the efficiency of the process depending on the number of most appropriate features and the number of training samples. Therefore, we discuss both techniques and sketch their functionality, where we restrict both boosting approaches to linear weak classifiers. We propose a feature subset selection method, which we evaluate on synthetic and on benchmark data sets.

    @InProceedings{drauschke2008comparison,
    title = {Comparison of Adaboost and ADTboost for Feature Subset Selection},
    author = {Drauschke, Martin and F\"orstner, Wolfgang},
    booktitle = {PRIS 2008},
    year = {2008},
    address = {Barcelona, Spain},
    pages = {113--122},
    abstract = {This paper addresses the problem of feature selection within classification processes. We present a comparison of a feature subset selection with respect to two boosting methods, Adaboost and ADTboost. In our evaluation, we have focused on three different criteria: the classification error and the efficiency of the process depending on the number of most appropriate features and the number of training samples. Therefore, we discuss both techniques and sketch their functionality, where we restrict both boosting approaches to linear weak classifiers. We propose a feature subset selection method, which we evaluate on synthetic and on benchmark data sets.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2008Comparison.pdf},
    }

  • M. Drauschke and W. Förstner, “Selecting appropriate features for detecting buildings and building parts,” in 21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS), Beijing, China, 2008, p. 447–452 Part B3b-1.
    [BibTeX] [PDF]

    The paper addresses the problem of feature selection during classification of image regions within the context of interpreting images showing highly structured objects such as buildings. We present a feature selection scheme that is connected with the classification framework Adaboost, cf. (Schapire and Singer, 1999). We constricted our weak learners on threshold classification on a single feature. Our experiments showed that the classification with Adaboost is based on relatively small subsets of features. Thus, we are able to find sets of appropriate features. We present our results on manually annotated and automatically segmented regions from facade images of the eTRIMS data base, where our focus were the object classes facade, roof, windows and window panes.

    @InProceedings{drauschke2008selecting,
    title = {Selecting appropriate features for detecting buildings and building parts},
    author = {Drauschke, Martin and F\"orstner, Wolfgang},
    booktitle = {21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS)},
    year = {2008},
    address = {Beijing, China},
    pages = {447--452 Part B3b-1},
    abstract = {The paper addresses the problem of feature selection during classification of image regions within the context of interpreting images showing highly structured objects such as buildings. We present a feature selection scheme that is connected with the classification framework Adaboost, cf. (Schapire and Singer, 1999). We constricted our weak learners on threshold classification on a single feature. Our experiments showed that the classification with Adaboost is based on relatively small subsets of features. Thus, we are able to find sets of appropriate features. We present our results on manually annotated and automatically segmented regions from facade images of the eTRIMS data base, where our focus were the object classes facade, roof, windows and window panes.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2008Selecting.pdf},
    }

  • B. Frank, M. Becker, C. Stachniss, M. Teschner, and W. Burgard, “Learning Cost Functions for Mobile Robot Navigation in Environments with Deformable Objects,” in Workshop on Path Planning on Cost Maps at the IEEE Int. Conf. on Robotics & Automation (ICRA), Pasadena, CA, USA, 2008.
    [BibTeX] [PDF]
    [none]
    @InProceedings{frank2008,
    title = {Learning Cost Functions for Mobile Robot Navigation in Environments with Deformable Objects},
    author = {Frank, B. and Becker, M. and Stachniss, C. and Teschner, M. and Burgard, W.},
    booktitle = icrawsplanning,
    year = {2008},
    address = {Pasadena, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/frank08icraws.pdf},
    }

  • B. Frank, M. Becker, C. Stachniss, M. Teschner, and W. Burgard, “Efficient Path Planning for Mobile Robots in Environments with Deformable Objects,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Pasadena, CA, USA, 2008.
    [BibTeX] [PDF]
    [none]
    @InProceedings{frank2008a,
    title = {Efficient Path Planning for Mobile Robots in Environments with Deformable Objects},
    author = {Frank, B. and Becker, M. and Stachniss, C. and Teschner, M. and Burgard, W.},
    booktitle = icra,
    year = {2008},
    address = {Pasadena, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/frank08icra.pdf},
    }

  • G. Grisetti, D. Lordi Rizzini, C. Stachniss, E. Olson, and W. Burgard, “Online Constraint Network Optimization for Efficient Maximum Likelihood Map Learning,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Pasadena, CA, USA, 2008.
    [BibTeX] [PDF]
    [none]
    @InProceedings{grisetti2008,
    title = {Online Constraint Network Optimization for Efficient Maximum Likelihood Map Learning},
    author = {Grisetti, G. and Lordi Rizzini, D. and Stachniss, C. and Olson, E. and Burgard, W.},
    booktitle = icra,
    year = {2008},
    address = {Pasadena, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti08icra.pdf},
    }

  • L. Jensen, “Automatische Detektion von Bombentrichtern,” Bachelor Thesis Master Thesis, 2008.
    [BibTeX] [PDF]

    Der Kampfmittelbeseitigungsdienst der Bezirksregierung Arnsberg nutzt Luftbilder aus dem Zweiten Weltkrieg zur Detektion von Blindgängern. Aufgrund der großen Anzahl an Bildern (über 300000) ist die Suche sehr aufwändig. Die Arbeit der Auswerter könnte erleichtert werden, wenn sie eine Karte hätten, auf der die Dichte der Bombardierung dargestellt ist. Um diese Karte zu erstellen, ist ein Verfahren notwendig, das die Bombentrichter auf den Bildern automatisch detektiert. Dieses wurde in der vorliegenden Bachelorarbeit realisiert. Da die Trichter sich in ihrer Gestalt und Größe stark unterscheiden, muss ein Ansatz zur Detektion gewählt werden, der mit diesen Variationen umgehen kann. Der Algorithmus führt eine Kandidatensuche mittels Kreuzkorrelation des Bildes mit einem repräsentativen Trichter-Template in verschiedenen Größen durch und klassifiziert die gefundenen Kandidaten anschließend. Die Klassifizierung erfolgt mit Hilfe der Wahrscheinlichkeitsdichte der Verteilungen der Klassen Trichter und Hintergrund. Um die Verteilungsparameter zu schätzen, ist die Dimensionsreduktion des Merkmalsraums der Trainingsdaten mit einer Hauptkomponentenanalyse (PCA) und einer linearen Diskriminanzanalyse nach Fisher (LDA) und anschließender Projektion in den Unterraum notwendig. In dieser Arbeit wurde das Verfahren mit einer Trichterklasse implementiert, es kann aber gut auf verschiedene Trichterklassen erweitert werden. Der Algorithmus zur Bombentrichterdetektion wurde in Matlab implementiert. Nach der Vorverarbeitung des Bildmaterials mussten zur Erstellung des Templates zunächst Trainingsbilder annotiert werden. Außerdem waren bei der Umsetzung verschiedene Parameter, wie z.B. die Templategrößen zur Kandidatensuche, die Dimension des PCA-Raums und die Bildausschnittsgröße bei der Klassifikation zu bestimmen. Zur Beurteilung der Ergebnisse wurde der Algorithmus auf den Trainingsbildern getestet und die Ergebnisse mit den Referenzdaten verglichen. Je nachdem ob vier oder fünf Templategrößen verwendet werden, können mit dem erstellten Template etwa 75% oder 80% der Trichter erfasst werden. Nach der Klassifikation werden mit dem implementierten Algorithmus je nach Konfiguration zwischen 70% und 64% der Trichter detektiert, dabei ist die Relevanz allerdings sehr gering. Maximal sind etwa 31% der als Trichter klassifizierten Bildausschnitte auch tatsächlich Bombentrichter. Bei der Analyse der false positives auf Testbildern ergab sich, dass bestimmte Bildstrukturen, wie Hausdächer, Schattenwurf an Straßen, Texturen in Feldern oder Waldstrukturen immer wieder fälschlicherweise als Trichter klassifiziert werden. Bei der Untersuchung der nicht detektierten Bombentrichter konnten Trichterklassen abgeleitet werden, die mit dem erstellten Template nicht detektiert werden. Mit den Testbildern wurde außerdem die Möglichkeit untersucht, die Bilder mit Hilfe der Bombentrichterdetektion in die Kategorien schwache, mittlere und starke Bombardierung einzuordnen. Hierbei wurden 73% der Bilder der richtigen Kategorie zugeordnet. Bei einer Steigerung der Relevanz und der Annotation weiterer Testbilder ist eine bessere Einordnung zu erwarten. Insgesamt liegt mit dieser Arbeit ein vielversprechender Ansatz zur Bombentrichterdetektion mit großer Erweiterungsmöglichkeit vor.

    @MastersThesis{jensen2008automatische,
    title = {Automatische Detektion von Bombentrichtern},
    author = {Jensen, Laura},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2008},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Inform. Martin Drauschke},
    type = {Bachelor Thesis},
    abstract = {Der Kampfmittelbeseitigungsdienst der Bezirksregierung Arnsberg nutzt Luftbilder aus dem Zweiten Weltkrieg zur Detektion von Blindg\"angern. Aufgrund der gro{\ss}en Anzahl an Bildern (\"uber 300000) ist die Suche sehr aufw\"andig. Die Arbeit der Auswerter k\"onnte erleichtert werden, wenn sie eine Karte h\"atten, auf der die Dichte der Bombardierung dargestellt ist. Um diese Karte zu erstellen, ist ein Verfahren notwendig, das die Bombentrichter auf den Bildern automatisch detektiert. Dieses wurde in der vorliegenden Bachelorarbeit realisiert. Da die Trichter sich in ihrer Gestalt und Gr\"o{\ss}e stark unterscheiden, muss ein Ansatz zur Detektion gew\"ahlt werden, der mit diesen Variationen umgehen kann. Der Algorithmus f\"uhrt eine Kandidatensuche mittels Kreuzkorrelation des Bildes mit einem repr\"asentativen Trichter-Template in verschiedenen Gr\"o{\ss}en durch und klassifiziert die gefundenen Kandidaten anschlie{\ss}end. Die Klassifizierung erfolgt mit Hilfe der Wahrscheinlichkeitsdichte der Verteilungen der Klassen Trichter und Hintergrund. Um die Verteilungsparameter zu sch\"atzen, ist die Dimensionsreduktion des Merkmalsraums der Trainingsdaten mit einer Hauptkomponentenanalyse (PCA) und einer linearen Diskriminanzanalyse nach Fisher (LDA) und anschlie{\ss}ender Projektion in den Unterraum notwendig. In dieser Arbeit wurde das Verfahren mit einer Trichterklasse implementiert, es kann aber gut auf verschiedene Trichterklassen erweitert werden. Der Algorithmus zur Bombentrichterdetektion wurde in Matlab implementiert. Nach der Vorverarbeitung des Bildmaterials mussten zur Erstellung des Templates zun\"achst Trainingsbilder annotiert werden. Au{\ss}erdem waren bei der Umsetzung verschiedene Parameter, wie z.B. die Templategr\"o{\ss}en zur Kandidatensuche, die Dimension des PCA-Raums und die Bildausschnittsgr\"o{\ss}e bei der Klassifikation zu bestimmen. Zur Beurteilung der Ergebnisse wurde der Algorithmus auf den Trainingsbildern getestet und die Ergebnisse mit den Referenzdaten verglichen. Je nachdem ob vier oder f\"unf Templategr\"o{\ss}en verwendet werden, k\"onnen mit dem erstellten Template etwa 75% oder 80% der Trichter erfasst werden. Nach der Klassifikation werden mit dem implementierten Algorithmus je nach Konfiguration zwischen 70% und 64% der Trichter detektiert, dabei ist die Relevanz allerdings sehr gering. Maximal sind etwa 31% der als Trichter klassifizierten Bildausschnitte auch tats\"achlich Bombentrichter. Bei der Analyse der false positives auf Testbildern ergab sich, dass bestimmte Bildstrukturen, wie Hausd\"acher, Schattenwurf an Stra{\ss}en, Texturen in Feldern oder Waldstrukturen immer wieder f\"alschlicherweise als Trichter klassifiziert werden. Bei der Untersuchung der nicht detektierten Bombentrichter konnten Trichterklassen abgeleitet werden, die mit dem erstellten Template nicht detektiert werden. Mit den Testbildern wurde au{\ss}erdem die M\"oglichkeit untersucht, die Bilder mit Hilfe der
    Bombentrichterdetektion in die Kategorien schwache, mittlere und starke Bombardierung einzuordnen. Hierbei wurden 73% der Bilder der richtigen Kategorie zugeordnet. Bei einer Steigerung der Relevanz und der Annotation weiterer Testbilder ist eine bessere Einordnung zu erwarten. Insgesamt liegt mit dieser Arbeit ein vielversprechender Ansatz zur Bombentrichterdetektion mit gro{\ss}er Erweiterungsm\"oglichkeit vor.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Jensen2008Automatische.pdf},
    }

  • L. Jensen, “Schattenentfernung aus Farbbildern mit dem Retinex-Algorithmus,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2008-01, 2008.
    [BibTeX]