@inproceedings{zeppelzauer_automatic_2018, address = {Yokohama, Japan}, title = {Automatic {Prediction} of {Building} {Age} from {Photographs}}, isbn = {978-1-4503-5046-4}, url = {https://arxiv.org/pdf/1804.02205}, doi = {10/ghpp2k}, language = {en}, urldate = {2018-10-10}, booktitle = {Proceedings of the {ACM} {International} {Conference} on {Multimedia} {Retrieval} ({ICMR} '18)}, publisher = {ACM Press}, author = {Zeppelzauer, Matthias and Despotovic, Miroslav and Sakeena, Muntaha and Koch, David and Döller, Mario}, year = {2018}, note = {Projekt: ImmBild Projekt: ImmoAge}, keywords = {Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Visual Computing, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {126--134}, } @inproceedings{zeppelzauer_sonicontrol_2018, address = {Seoul, South Korea}, title = {{SoniControl} - {A} {Mobile} {Ultrasonic} {Firewall}}, url = {https://arxiv.org/abs/1807.07617}, doi = {10/gh377f}, abstract = {The exchange of data between mobile devices in the near-ultrasonic frequency band is a new promising technology for near field communication (NFC) but also raises a number of privacy concerns. We present the first ultrasonic firewall that reliably detects ultrasonic communication and provides the user with effective means to prevent hidden data exchange. This demonstration showcases a new media-based communication technology ("data over audio") together with its related privacy concerns. It enables users to (i) interactively test out and experience ultrasonic information exchange and (ii) shows how to protect oneself against unwanted tracking.}, urldate = {2018-10-10}, booktitle = {Proceedings of the {ACM} {International} {Conference} on {Multimedia}}, publisher = {ACM Press}, author = {Zeppelzauer, Matthias and Ringot, Alexis and Taurer, Florian}, year = {2018}, note = {arXiv: 1807.07617}, keywords = {Acoustic Cookies, Acoustic Firewall, Acoustic Tracking, Center for Artificial Intelligence, Computer Science - Cryptography and Security, Computer Science - Multimedia, FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Ultrasound Data Transmission, Wiss. Beitrag, best, best-aringot, peer-reviewed}, } @article{zeppelzauer_automated_2013, title = {Automated detection of elephants in wildlife video}, volume = {2013}, issn = {1687-5281}, url = {https://doi.org/10.1186/1687-5281-2013-46}, doi = {10/f3snb6}, abstract = {Biologists often have to investigate large amounts of video in behavioral studies of animals. These videos are usually not sufficiently indexed which makes the finding of objects of interest a time-consuming task. We propose a fully automated method for the detection and tracking of elephants in wildlife video which has been collected by biologists in the field. The method dynamically learns a color model of elephants from a few training images. Based on the color model, we localize elephants in video sequences with different backgrounds and lighting conditions. We exploit temporal clues from the video to improve the robustness of the approach and to obtain spatial and temporal consistent detections. The proposed method detects elephants (and groups of elephants) of different sizes and poses performing different activities. The method is robust to occlusions (e.g., by vegetation) and correctly handles camera motion and different lighting conditions. Experiments show that both near- and far-distant elephants can be detected and tracked reliably. The proposed method enables biologists efficient and direct access to their video collections which facilitates further behavioral and ecological studies. The method does not make hard constraints on the species of elephants themselves and is thus easily adaptable to other animal species.}, number = {1}, journal = {EURASIP Journal on Image and Video Processing}, author = {Zeppelzauer, Matthias}, month = aug, year = {2013}, keywords = {Center for Artificial Intelligence, Computer Vision, Extern, FH SP Data Analytics \& Visual Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Wiss. Beitrag}, pages = {46}, } @article{bernard_vial_2018, title = {{VIAL} – {A} {Unified} {Process} for {Visual}-{Interactive} {Labeling}}, volume = {34}, copyright = {Springer, Berlin, Heidelberg}, issn = {1432-2315}, url = {https://bit.ly/2My1Yrt}, doi = {10/gd5hr3}, abstract = {The assignment of labels to data instances is a fundamental prerequisite for many machine learning tasks. Moreover, labeling is a frequently applied process in visual-interactive analysis approaches and visual analytics. However, the strategies for creating labels usually differ between these two fields. This raises the question whether synergies between the different approaches can be attained. In this paper, we study the process of labeling data instances with the user in the loop, from both the machine learning and visual-interactive perspective. Based on a review of differences and commonalities, we propose the ’Visual-Interactive Labeling‘ (VIAL) process that unifies both approaches. We describe the six major steps of the process and discuss their specific challenges. Additionally, we present two heterogeneous usage scenarios from the novel VIAL perspective, one on metric distance learning and one on object detection in videos. Finally, we discuss general challenges to VIAL and point out necessary work for the realization of future VIAL approaches.}, number = {1189}, journal = {The Visual Computer}, author = {Bernard, Jürgen and Zeppelzauer, Matthias and Sedlmair, Michael and Aigner, Wolfgang}, year = {2018}, note = {Projekt: KAVA-Time Projekt: IntelliGait Projekt: CARMA}, keywords = {Active Learning, Candidate Selection, Center for Artificial Intelligence, Creative Industries, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Interactive Labeling, Labeling Strategies, Machine Learning, Media Computing Group, Visual Interactive Labeling, best, best-mzeppelzauer, information visualization}, pages = {16}, } @article{stoger_visualizing_2012, title = {Visualizing {Sound} {Emission} of {Elephant} {Vocalizations}: {Evidence} for {Two} {Rumble} {Production} {Types}}, volume = {7}, url = {http://dx.plos.org/10.1371/journal.pone.0048907}, number = {11:e48907}, journal = {Plos One}, author = {Stöger, A. and Heimann, G. and Zeppelzauer, Matthias and Ganswindt, A. and Hensman, S. and Charlton, B.}, year = {2012}, keywords = {Acoustic Camera, Aggregated LPC Spectral Features, Audio classification, Center for Artificial Intelligence, Content-based Audio Retrieval, Extern, FH SP Data Analytics \& Visual Computing, Formant Analysis, Infrasonic Sound Analysis, Institut für Creative Media Technologies, LPC, LPC Spectrogram, Linear Predictive Coding, Machine Learning, Media Computing Group, Sound Classification, Sound Recognition, Wiss. Beitrag, best-mzeppelzauer, pattern recognition, ⛔ No DOI found}, } @inproceedings{zielinski_persistence_2019, address = {Macao, China}, title = {Persistence {Bag}-of-{Words} for {Topological} {Data} {Analysis}}, url = {http://arxiv.org/abs/1802.04852}, doi = {10/ghpp7z}, urldate = {2018-10-10}, booktitle = {Proceedings of the {International} {Joint} {Conference} on {Artificial} {Intelligence} 2019}, author = {Zielinski, Bartosz and {Lipinski, Michal} and Juda, Mateusz and Zeppelzauer, Matthias and {Dlotko, Pawel}}, year = {2019}, note = {arXiv: 1802.04852}, keywords = {Artificial Intelligence, Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Mathematics - Algebraic Topology, Media Computing Group, Statistics, Vortrag, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {6}, } @article{zeppelzauer_multimodal_2016, title = {Multimodal classification of events in social media}, issn = {0262-8856}, url = {https://arxiv.org/pdf/1601.00599}, doi = {10/ghpp2q}, abstract = {Abstract A large amount of social media hosted on platforms like Flickr and Instagram is related to social events. The task of social event classification refers to the distinction of event and non-event-related contents as well as the classification of event types (e.g. sports events and concerts). In this paper, we provide an extensive study of textual, visual, as well as multimodal representations for social event classification. We investigate the strengths and weaknesses of the modalities and study the synergy effects between the modalities. Experimental results obtained with our multimodal representation outperform state-of-the-art methods and provide a new baseline for future research.}, journal = {Image and Vision Computing}, author = {Zeppelzauer, Matthias and Schopfhauser, Daniel}, year = {2016}, keywords = {2016, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Multimodal retrieval, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, } @article{zeppelzauer_study_2018, title = {A {Study} on {Topological} {Descriptors} for the {Analysis} of {3D} {Surface} {Texture}}, volume = {167}, issn = {1077-3142}, url = {https://arxiv.org/pdf/1710.10662}, doi = {10/ghpp2h}, abstract = {Methods from computational topology are becoming more and more popular in computer vision and have shown to improve the state-of-the-art in several tasks. In this paper, we investigate the applicability of topological descriptors in the context of 3D surface analysis for the classification of different surface textures. We present a comprehensive study on topological descriptors, investigate their robustness and expressiveness and compare them with state-of-the-art methods. Results show that class-specific information is reflected well in topological descriptors. The investigated descriptors can directly compete with non-topological descriptors and capture orthogonal information. Moreover they improve the state-of-the-art in combination with non-topological descriptors.}, journal = {Journal on Computer Vision and Image Understanding (CVIU)}, author = {Zeppelzauer, Matthias and Zielinski, Bartosz and Juda, Mateusz and Seidl, Markus}, year = {2018}, note = {Projekt: PITOTI 3D}, keywords = {3D surface classification, Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Surface texture analysis, Visual Computing, Wiss. Beitrag, best, best-lbseidl, best-mzeppelzauer, peer-reviewed, persistence diagram, persistence image, persistent homology, surface representation, surface topology analysis}, pages = {74 -- 88}, } @article{bernard_jurgen_comparing_2017, title = {Comparing {Visual}-{Interactive} {Labeling} with {Active} {Learning}: {An} {Experimental} {Study}}, volume = {24}, issn = {1077-2626}, url = {http://eprints.cs.univie.ac.at/5257/1/bernard2017labeling.pdf}, doi = {10/gcqb3r}, number = {1}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, author = {{Bernard, Jürgen} and Hutter, Marco and Zeppelzauer, Matthias and Fellner, Dieter and Sedlmair, Michael}, year = {2017}, keywords = {2017, Center for Artificial Intelligence, Creative Industries, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, } @inproceedings{zaharieva_social_2015, address = {Shanghai, China}, title = {Social {Event} {Mining} in {Large} {Photo} {Collections}}, abstract = {A significant part of publicly available photos on the Internet depicts a variety of different social events. In order to organize this steadily growing media content and to make it easily accessible, novel indexing methods are required. Essential research questions in this context concern the efficient detection (clustering), classification, and retrieval of social events in large media collections. In this paper we explore two aspects of social events mining. First, the initial clustering of a given photo collection into single events and, second, the retrieval of relevant social events based on user queries. For both aspects we employ commonly available metadata information, such as user, time, GPS data, and user-generated textual descriptions. Performed evaluations in the context of social event detection demonstrate the strong generalization ability of our approach and the potential of contextual data such as time, user, and location. Experiments with social event retrieval clearly indicate the open challenge of mapping between previously detected event clusters and heterogeneous user queries.}, booktitle = {Proceedings of the {International} {Conference} on {Multimedia} {Retrieval}}, publisher = {ACM Press}, author = {Zaharieva, Maia and Zeppelzauer, Matthias and Del Fabro, Manfred and Schopfhauser, Daniel}, month = mar, year = {2015}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed, visual computing}, } @incollection{mitrovic_features_2010, title = {Features for {Content}-{Based} {Audio} {Retrieval}}, volume = {78}, isbn = {978-0-12-381019-9}, url = {http://www.sciencedirect.com/science/article/pii/S0065245810780037}, booktitle = {Advances in {Computers}}, publisher = {Burlington: Academic Press}, author = {Mitrović, Dalibor and Zeppelzauer, Matthias and Breiteneder, C.}, year = {2010}, keywords = {Audio Descriptors, Audio Features, Audio classification, Center for Artificial Intelligence, Content-based Audio Features, Content-based Audio Retrieval, Extern, FH SP Data Analytics \& Visual Computing, Feature Design, Feature Taxonomy, Institut für Creative Media Technologies, Media Computing Group, Sound Recognition, Wiss. Beitrag, best-mzeppelzauer, cepstral features, pattern recognition, spectral features, survey, temporal features}, pages = {71--150}, }