@article{slijepcevic_explaining_2022, title = {Explaining {Machine} {Learning} {Models} for {Clinical} {Gait} {Analysis}}, volume = {3}, copyright = {CC-BY-NC-SA}, issn = {2691-1957}, url = {https://doi.org/10.1145/3474121}, doi = {10.1145/3474121}, number = {2}, journal = {ACM Transactions on Computing for Healthcare}, author = {Slijepcevic, Djordje and Horst, Fabian and Lapuschkin, Sebastian and Horsak, Brian and Raberger, Anna-Maria and Kranzl, Andreas and Samek, Wojciech and Breitender, Christian and Schöllhorn, Wolfgang and Zeppelzauer, Matthias}, year = {2022}, note = {Projekt: I3D Projekt: ReMoCapLab Projekt: DHLab}, keywords = {2020, Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHLab, Department Gesundheit, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP CDHSI Motor Rehabilitation, Studiengang Physiotherapie, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {14:1--14:27}, } @article{wagner_kavagait_2018, title = {{KAVAGait}: {Knowledge}-{Assisted} {Visual} {Analytics} for {Clinical} {Gait} {Analysis}}, volume = {25}, url = {https://doi.org/10.1109/TVCG.2017.2785271}, doi = {10/ghppzn}, abstract = {In 2014, more than 10 million people in the US were affected by an ambulatory disability. Thus, gait rehabilitation is a crucial part of health care systems. The quantification of human locomotion enables clinicians to describe and analyze a patient’s gait performance in detail and allows them to base clinical decisions on objective data. These assessments generate a vast amount of complex data which need to be interpreted in a short time period. We conducted a design study in cooperation with gait analysis experts to develop a novel Knowledge-Assisted Visual Analytics solution for clinical Gait analysis (KAVAGait). KAVAGait allows the clinician to store and inspect complex data derived during clinical gait analysis. The system incorporates innovative and interactive visual interface concepts, which were developed based on the needs of clinicians. Additionally, an explicit knowledge store (EKS) allows externalization and storage of implicit knowledge from clinicians. It makes this information available for others, supporting the process of data inspection and clinical decision making. We validated our system by conducting expert reviews, a user study, and a case study. Results suggest that KAVAGait is able to support a clinician during clinical practice by visualizing complex gait data and providing knowledge of other clinicians.}, number = {3}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, author = {Wagner, Markus and Slijepcevic, Djordje and Horsak, Brian and Rind, Alexander and Zeppelzauer, Matthias and Aigner, Wolfgang}, year = {2018}, note = {Projekt: KAVA-Time Projekt: IntelliGait Projekt: CARMA Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHLab, Design Study, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Healthcare, Human Gait Analysis, Human-Computer Interaction, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Visual analytics, Wiss. Beitrag, best, best-bhorsak, best-lbaigner, best-lbwagnerm, best-mzeppelzauer, information visualization, knowledge generation, peer-reviewed}, pages = {1528--1542}, } @article{slijepcevic_automatic_2018, title = {Automatic {Classification} of {Functional} {Gait} {Disorders}}, volume = {5}, issn = {2168-2194}, url = {https://arxiv.org/abs/1712.06405}, doi = {10/ghz24w}, number = {22}, urldate = {2017-12-21}, journal = {IEEE Journal of Biomedical and Health Informatics}, author = {Slijepcevic, Djordje and Zeppelzauer, Matthias and Raberger, Anna-Maria and Schwab, Caterine and Schuller, Michael and Baca, Arnold and Breiteneder, Christian and Horsak, Brian}, year = {2018}, note = {Projekt: IntelliGait Projekt: CARMA Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHLab, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, best-mzeppelzauer, peer-reviewed}, pages = {1653 -- 1661}, } @article{horsak_gaitrec_2020, title = {{GaitRec}, a large-scale ground reaction force dataset of healthy and impaired gait}, volume = {7:143}, copyright = {CC BY}, url = {https://www.nature.com/articles/s41597-020-0481-z}, doi = {10/gh372d}, number = {1}, journal = {Scientific Data}, author = {Horsak, Brian and Slijepcevic, Djordje and Raberger, Anna-Maria and Schwab, Caterine and Worisch, Marianne and Zeppelzauer, Matthias}, year = {2020}, note = {Projekt: I3D Projekt: IntelliGait Projekt: DHLab}, keywords = {2019, Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHLab, Eintrag überprüfen, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Green OA, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, Open Access, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, best-mzeppelzauer, peer-reviewed, submitted}, pages = {1--8}, } @inproceedings{zeppelzauer_automatic_2018, address = {Yokohama, Japan}, title = {Automatic {Prediction} of {Building} {Age} from {Photographs}}, isbn = {978-1-4503-5046-4}, url = {https://arxiv.org/pdf/1804.02205}, doi = {10/ghpp2k}, language = {en}, urldate = {2018-10-10}, booktitle = {Proceedings of the {ACM} {International} {Conference} on {Multimedia} {Retrieval} ({ICMR} '18)}, publisher = {ACM Press}, author = {Zeppelzauer, Matthias and Despotovic, Miroslav and Sakeena, Muntaha and Koch, David and Döller, Mario}, year = {2018}, note = {Projekt: ImmBild Projekt: ImmoAge}, keywords = {Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Visual Computing, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {126--134}, } @inproceedings{koch_visual_2018, address = {Yokohama, Japan}, title = {Visual {Estimation} of {Building} {Condition} with {Patch}-level {ConvNets}}, isbn = {978-1-4503-5797-5}, url = {http://dl.acm.org/citation.cfm?doid=3210499.3210526}, doi = {10/ghpp2m}, language = {en}, urldate = {2018-10-10}, booktitle = {Proceedings of the 2018 {ACM} {Workshop} on {Multimedia} for {Real} {Estate} {Tech} - {RETech}'18}, publisher = {ACM Press}, author = {Koch, David and Despotovic, Miroslav and Sakeena, Muntaha and Döller, Mario and Zeppelzauer, Matthias}, year = {2018}, note = {Projekt: ImmBild Projekt: ImmoAge}, keywords = {Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Wiss. Beitrag, peer-reviewed}, pages = {12--17}, } @inproceedings{zeppelzauer_sonicontrol_2018, address = {Seoul, South Korea}, title = {{SoniControl} - {A} {Mobile} {Ultrasonic} {Firewall}}, url = {https://arxiv.org/abs/1807.07617}, doi = {10/gh377f}, abstract = {The exchange of data between mobile devices in the near-ultrasonic frequency band is a new promising technology for near field communication (NFC) but also raises a number of privacy concerns. We present the first ultrasonic firewall that reliably detects ultrasonic communication and provides the user with effective means to prevent hidden data exchange. This demonstration showcases a new media-based communication technology ("data over audio") together with its related privacy concerns. It enables users to (i) interactively test out and experience ultrasonic information exchange and (ii) shows how to protect oneself against unwanted tracking.}, urldate = {2018-10-10}, booktitle = {Proceedings of the {ACM} {International} {Conference} on {Multimedia}}, publisher = {ACM Press}, author = {Zeppelzauer, Matthias and Ringot, Alexis and Taurer, Florian}, year = {2018}, note = {arXiv: 1807.07617}, keywords = {Acoustic Cookies, Acoustic Firewall, Acoustic Tracking, Center for Artificial Intelligence, Computer Science - Cryptography and Security, Computer Science - Multimedia, FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Ultrasound Data Transmission, Wiss. Beitrag, best, best-aringot, peer-reviewed}, } @article{despotovic_prediction_2019, title = {Prediction and analysis of heating energy demand for detached houses by computer vision}, volume = {193}, issn = {0360-5442}, url = {https://www.sciencedirect.com/science/article/pii/S0378778818336430?via%3Dihub}, doi = {10/fsxn}, abstract = {Exterior images of real estate contain a large number of visual clues which allow conclusions about the heating energy demand (HED) of a building. Up to now, HED has been determined by specially trained experts such as architects, civil engineers, etc. either on the basis of consumption data or estimated demand values. In this article, we present a novel approach to determine the HED of detached houses. Our suggested approach is based solely on the visual appearance and assumes that exterior images of a building contain a variety of information that allows inferences about the HED of a building. For this, we use the powerful techniques of image analysis and computer vision which are already successfully used in different domains like surveillance, image search, and robotics. The results show that our approach works well and in addition to the HED, the construction period of a building can also be determined. Our algorithm achieves a classification accuracy of 62\% for HED and 57\% for construction age epoch.}, journal = {Energy \& Buildings}, author = {Despotovic, Miroslav and Koch, David and Leiber, Sascha and Döller, Mario and Sakeena, Muntaha and Zeppelzauer, Matthias}, year = {2019}, note = {Projekt: ImmBild Projekt: ImmoAge}, keywords = {Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Visual Computing, Wiss. Beitrag, best, peer-reviewed}, pages = {29--35}, } @article{zeppelzauer_automated_2013, title = {Automated detection of elephants in wildlife video}, volume = {2013}, issn = {1687-5281}, url = {https://doi.org/10.1186/1687-5281-2013-46}, doi = {10/f3snb6}, abstract = {Biologists often have to investigate large amounts of video in behavioral studies of animals. These videos are usually not sufficiently indexed which makes the finding of objects of interest a time-consuming task. We propose a fully automated method for the detection and tracking of elephants in wildlife video which has been collected by biologists in the field. The method dynamically learns a color model of elephants from a few training images. Based on the color model, we localize elephants in video sequences with different backgrounds and lighting conditions. We exploit temporal clues from the video to improve the robustness of the approach and to obtain spatial and temporal consistent detections. The proposed method detects elephants (and groups of elephants) of different sizes and poses performing different activities. The method is robust to occlusions (e.g., by vegetation) and correctly handles camera motion and different lighting conditions. Experiments show that both near- and far-distant elephants can be detected and tracked reliably. The proposed method enables biologists efficient and direct access to their video collections which facilitates further behavioral and ecological studies. The method does not make hard constraints on the species of elephants themselves and is thus easily adaptable to other animal species.}, number = {1}, journal = {EURASIP Journal on Image and Video Processing}, author = {Zeppelzauer, Matthias}, month = aug, year = {2013}, keywords = {Center for Artificial Intelligence, Computer Vision, Extern, FH SP Data Analytics \& Visual Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Wiss. Beitrag}, pages = {46}, } @article{slijepcevic_kanonymity_2021, title = {k‑{Anonymity} in {Practice}: {How} {Generalisation} and {Suppression} {Affect} {Machine} {Learning} {Classifiers}}, volume = {111}, copyright = {Open Access}, issn = {0167-4048}, url = {https://doi.org/10.1016/j.cose.2021.102488}, doi = {10.1016/j.cose.2021.102488}, journal = {Computers \& Security}, author = {Slijepčević, Djordje and Henzl, Maximilian and Klausner, Lukas Daniel and Dam, Tobias and Kieseberg, Peter and Zeppelzauer, Matthias}, month = oct, year = {2021}, keywords = {Center for Artificial Intelligence, FH SP Cyber Security, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Institut für IT Sicherheitsforschung, SP IT Sec Applied Security \& Data Science, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {19}, } @article{bernard_proseco_2021, title = {{ProSeCo}: {Visual} analysis of class separation measures and dataset characteristics}, volume = {96}, copyright = {Open Access}, issn = {0097-8493}, url = {https://www.sciencedirect.com/science/article/pii/S0097849321000406}, doi = {https://doi.org/10.1016/j.cag.2021.03.004}, abstract = {Class separation is an important concept in machine learning and visual analytics. We address the visual analysis of class separation measures for both high-dimensional data and its corresponding projections into 2D through dimensionality reduction (DR) methods. Although a plethora of separation measures have been proposed, it is difficult to compare class separation between multiple datasets with different characteristics, multiple separation measures, and multiple DR methods. We present ProSeCo, an interactive visualization approach to support comparison between up to 20 class separation measures and up to 4 DR methods, with respect to any of 7 dataset characteristics: dataset size, dataset dimensions, class counts, class size variability, class size skewness, outlieriness, and real-world vs. synthetically generated data. ProSeCo supports (1) comparing across measures, (2) comparing high-dimensional to dimensionally-reduced 2D data across measures, (3) comparing between different DR methods across measures, (4) partitioning with respect to a dataset characteristic, (5) comparing partitions for a selected characteristic across measures, and (6) inspecting individual datasets in detail. We demonstrate the utility of ProSeCo in two usage scenarios, using datasets [1] posted at https://osf.io/epcf9/.}, journal = {Computers \& Graphics}, author = {Bernard, Jürgen and Hutter, Marco and Zeppelzauer, Matthias and Sedlmair, Michael and Munzner, Tamara}, year = {2021}, keywords = {Center for Artificial Intelligence, Computers and Graphics, FH SP Data Analytics \& Visual Computing, Formatting, Forschungsgruppe Media Computing, Guidelines, Institut für Creative Media Technologies, Wiss. Beitrag, best-mzeppelzauer, peer-reviewed}, pages = {48--60}, } @article{zielinski_persistence_2021, title = {Persistence {Codebooks} for {Topological} {Data} {Analysis}}, volume = {54}, copyright = {Open Access}, issn = {0269-2821}, url = {https://rdcu.be/b6ENZ}, doi = {https://doi.org/10.1007/s10462-020-09897-4}, journal = {Journal of Artificial Intelligence Review}, author = {Zielinski, Bartosz and Lipinski, Michal and Juda, Mateusz and {Zeppelzauer, Matthias} and {Dlotko, Pawel}}, year = {2021}, keywords = {3D surface classification, Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Green OA, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Open Access, Surface texture analysis, Visual Computing, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed, persistence diagram, persistence image, persistent homology, surface representation, surface topology analysis}, pages = {1969--2009}, } @article{bernard_jurgen_taxonomy_2021, title = {A {Taxonomy} of {Property} {Measures} to {Unify} {Active} {Learning} and {Human}-centered {Approaches} to {Data} {Labeling}}, volume = {11}, copyright = {Open Access}, issn = {2160-6455}, url = {https://dl.acm.org/doi/abs/10.1145/3439333}, doi = {10/gnt2wf}, number = {3-4}, journal = {ACM Transactions on Interactive Intelligent Systems (TiiS)}, author = {{Bernard, Jürgen} and Hutter, Marco and Sedlmair, Michael and {Zeppelzauer, Matthias} and {Munzner, Tamara}}, year = {2021}, note = {Projekt: BigDataAnalytics Projekt: I3D Projekt: PlantAI}, keywords = {2020, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Eintrag überprüfen, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Media Computing Group, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {1--42}, } @article{bernard_vial_2018, title = {{VIAL} – {A} {Unified} {Process} for {Visual}-{Interactive} {Labeling}}, volume = {34}, copyright = {Springer, Berlin, Heidelberg}, issn = {1432-2315}, url = {https://bit.ly/2My1Yrt}, doi = {10/gd5hr3}, abstract = {The assignment of labels to data instances is a fundamental prerequisite for many machine learning tasks. Moreover, labeling is a frequently applied process in visual-interactive analysis approaches and visual analytics. However, the strategies for creating labels usually differ between these two fields. This raises the question whether synergies between the different approaches can be attained. In this paper, we study the process of labeling data instances with the user in the loop, from both the machine learning and visual-interactive perspective. Based on a review of differences and commonalities, we propose the ’Visual-Interactive Labeling‘ (VIAL) process that unifies both approaches. We describe the six major steps of the process and discuss their specific challenges. Additionally, we present two heterogeneous usage scenarios from the novel VIAL perspective, one on metric distance learning and one on object detection in videos. Finally, we discuss general challenges to VIAL and point out necessary work for the realization of future VIAL approaches.}, number = {1189}, journal = {The Visual Computer}, author = {Bernard, Jürgen and Zeppelzauer, Matthias and Sedlmair, Michael and Aigner, Wolfgang}, year = {2018}, note = {Projekt: KAVA-Time Projekt: IntelliGait Projekt: CARMA}, keywords = {Active Learning, Candidate Selection, Center for Artificial Intelligence, Creative Industries, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Interactive Labeling, Labeling Strategies, Machine Learning, Media Computing Group, Visual Interactive Labeling, best, best-mzeppelzauer, information visualization}, pages = {16}, } @article{stoger_visualizing_2012, title = {Visualizing {Sound} {Emission} of {Elephant} {Vocalizations}: {Evidence} for {Two} {Rumble} {Production} {Types}}, volume = {7}, url = {http://dx.plos.org/10.1371/journal.pone.0048907}, number = {11:e48907}, journal = {Plos One}, author = {Stöger, A. and Heimann, G. and Zeppelzauer, Matthias and Ganswindt, A. and Hensman, S. and Charlton, B.}, year = {2012}, keywords = {Acoustic Camera, Aggregated LPC Spectral Features, Audio classification, Center for Artificial Intelligence, Content-based Audio Retrieval, Extern, FH SP Data Analytics \& Visual Computing, Formant Analysis, Infrasonic Sound Analysis, Institut für Creative Media Technologies, LPC, LPC Spectrogram, Linear Predictive Coding, Machine Learning, Media Computing Group, Sound Classification, Sound Recognition, Wiss. Beitrag, best-mzeppelzauer, pattern recognition, ⛔ No DOI found}, } @inproceedings{zielinski_persistence_2019, address = {Macao, China}, title = {Persistence {Bag}-of-{Words} for {Topological} {Data} {Analysis}}, url = {http://arxiv.org/abs/1802.04852}, doi = {10/ghpp7z}, urldate = {2018-10-10}, booktitle = {Proceedings of the {International} {Joint} {Conference} on {Artificial} {Intelligence} 2019}, author = {Zielinski, Bartosz and {Lipinski, Michal} and Juda, Mateusz and Zeppelzauer, Matthias and {Dlotko, Pawel}}, year = {2019}, note = {arXiv: 1802.04852}, keywords = {Artificial Intelligence, Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Mathematics - Algebraic Topology, Media Computing Group, Statistics, Vortrag, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {6}, } @inproceedings{bernard_towards_2018, address = {Brno, Czech Republic}, title = {Towards {User}-{Centered} {Active} {Learning} {Algorithms}}, volume = {37}, url = {http://doi.wiley.com/10.1111/cgf.13406}, doi = {10/gdw79h}, language = {en}, urldate = {2018-10-10}, booktitle = {Computer {Graphics} {Forum}}, author = {Bernard, Jürgen and Zeppelzauer, Matthias and Lehmann, Markus and Müller, Martin and Sedlmair, Michael}, year = {2018}, keywords = {Center for Artificial Intelligence, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Visual analytics, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {121--132}, } @article{zaharieva_cross-platform_2015, title = {Cross-{Platform} {Social} {Event} {Detection}}, volume = {22}, issn = {1070-986X}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7045414&tag=1}, doi = {10/gh3773}, abstract = {It is becoming more and more popular to share personal experiences on platforms such as Flickr and YouTube. Uploaded media is usually described by both technical and user-generated metadata that is commonly used for their access and retrieval. Thus, a crucial aspect in this context is the quality and reliability of provided metadata. The mining of media across sharing platforms bears the additional challenge about potential di⬚erences in the maintained metadata. In order to provide a baseline for further research, we perform a thorough evaluation of the usefulness of available metadata in the context of social event detection in both single media repository scenario and across di⬚erent platforms.}, number = {3}, journal = {IEEE Multimedia}, author = {Zaharieva, Maia and Del Fabro, Manfred and Zeppelzauer, Matthias}, month = jan, year = {2015}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {14}, } @article{zeppelzauer_multimodal_2016, title = {Multimodal classification of events in social media}, issn = {0262-8856}, url = {https://arxiv.org/pdf/1601.00599}, doi = {10/ghpp2q}, abstract = {Abstract A large amount of social media hosted on platforms like Flickr and Instagram is related to social events. The task of social event classification refers to the distinction of event and non-event-related contents as well as the classification of event types (e.g. sports events and concerts). In this paper, we provide an extensive study of textual, visual, as well as multimodal representations for social event classification. We investigate the strengths and weaknesses of the modalities and study the synergy effects between the modalities. Experimental results obtained with our multimodal representation outperform state-of-the-art methods and provide a new baseline for future research.}, journal = {Image and Vision Computing}, author = {Zeppelzauer, Matthias and Schopfhauser, Daniel}, year = {2016}, keywords = {2016, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Multimodal retrieval, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, } @article{zeppelzauer_study_2018, title = {A {Study} on {Topological} {Descriptors} for the {Analysis} of {3D} {Surface} {Texture}}, volume = {167}, issn = {1077-3142}, url = {https://arxiv.org/pdf/1710.10662}, doi = {10/ghpp2h}, abstract = {Methods from computational topology are becoming more and more popular in computer vision and have shown to improve the state-of-the-art in several tasks. In this paper, we investigate the applicability of topological descriptors in the context of 3D surface analysis for the classification of different surface textures. We present a comprehensive study on topological descriptors, investigate their robustness and expressiveness and compare them with state-of-the-art methods. Results show that class-specific information is reflected well in topological descriptors. The investigated descriptors can directly compete with non-topological descriptors and capture orthogonal information. Moreover they improve the state-of-the-art in combination with non-topological descriptors.}, journal = {Journal on Computer Vision and Image Understanding (CVIU)}, author = {Zeppelzauer, Matthias and Zielinski, Bartosz and Juda, Mateusz and Seidl, Markus}, year = {2018}, note = {Projekt: PITOTI 3D}, keywords = {3D surface classification, Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Surface texture analysis, Visual Computing, Wiss. Beitrag, best, best-lbseidl, best-mzeppelzauer, peer-reviewed, persistence diagram, persistence image, persistent homology, surface representation, surface topology analysis}, pages = {74 -- 88}, } @article{bernard_jurgen_comparing_2017, title = {Comparing {Visual}-{Interactive} {Labeling} with {Active} {Learning}: {An} {Experimental} {Study}}, volume = {24}, issn = {1077-2626}, url = {http://eprints.cs.univie.ac.at/5257/1/bernard2017labeling.pdf}, doi = {10/gcqb3r}, number = {1}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, author = {{Bernard, Jürgen} and Hutter, Marco and Zeppelzauer, Matthias and Fellner, Dieter and Sedlmair, Michael}, year = {2017}, keywords = {2017, Center for Artificial Intelligence, Creative Industries, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, } @inproceedings{salvador_cultural_2015, address = {Boston, Massachusetts, United States}, title = {Cultural {Event} {Recognition} with {Visual} {ConvNets} and {Temporal} {Models}}, url = {http://arxiv.org/abs/1504.06567}, abstract = {This paper presents our contribution to the ChaLearn Challenge 2015 on Cultural Event Classification. The challenge in this task is to automatically classify images from 50 different cultural events. Our solution is based on the combination of visual features extracted from convolutional neural networks with temporal information using a hierarchical classifier scheme. We extract visual features from the last three fully connected layers of both CaffeNet (pretrained with ImageNet) and our fine tuned version for the ChaLearn challenge. We propose a late fusion strategy that trains a separate low-level SVM on each of the extracted neural codes. The class predictions of the low-level SVMs form the input to a higher level SVM, which gives the final event scores. We achieve our best result by adding a temporal refinement step into our classification scheme, which is applied directly to the output of each low-level SVM. Our approach penalizes high classification scores based on visual features when their time stamp does not match well an event-specific temporal distribution learned from the training and validation data. Our system achieved the second best result in the ChaLearn Challenge 2015 on Cultural Event Classification with a mean average precision of 0.767 on the test set.}, booktitle = {Proceedings of the {CVPR} {Workshop} {ChaLearn} {Looking} at {People} 2015}, publisher = {IEEE}, author = {Salvador, Amaia and Zeppelzauer, Matthias and Manchón-Vizuente, Daniel and Calafell, Andrea and Giró-i-Nieto, Xavier}, month = apr, year = {2015}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed, visual computing}, } @inproceedings{zaharieva_social_2015, address = {Shanghai, China}, title = {Social {Event} {Mining} in {Large} {Photo} {Collections}}, abstract = {A significant part of publicly available photos on the Internet depicts a variety of different social events. In order to organize this steadily growing media content and to make it easily accessible, novel indexing methods are required. Essential research questions in this context concern the efficient detection (clustering), classification, and retrieval of social events in large media collections. In this paper we explore two aspects of social events mining. First, the initial clustering of a given photo collection into single events and, second, the retrieval of relevant social events based on user queries. For both aspects we employ commonly available metadata information, such as user, time, GPS data, and user-generated textual descriptions. Performed evaluations in the context of social event detection demonstrate the strong generalization ability of our approach and the potential of contextual data such as time, user, and location. Experiments with social event retrieval clearly indicate the open challenge of mapping between previously detected event clusters and heterogeneous user queries.}, booktitle = {Proceedings of the {International} {Conference} on {Multimedia} {Retrieval}}, publisher = {ACM Press}, author = {Zaharieva, Maia and Zeppelzauer, Matthias and Del Fabro, Manfred and Schopfhauser, Daniel}, month = mar, year = {2015}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed, visual computing}, } @incollection{mitrovic_features_2010, title = {Features for {Content}-{Based} {Audio} {Retrieval}}, volume = {78}, isbn = {978-0-12-381019-9}, url = {http://www.sciencedirect.com/science/article/pii/S0065245810780037}, booktitle = {Advances in {Computers}}, publisher = {Burlington: Academic Press}, author = {Mitrović, Dalibor and Zeppelzauer, Matthias and Breiteneder, C.}, year = {2010}, keywords = {Audio Descriptors, Audio Features, Audio classification, Center for Artificial Intelligence, Content-based Audio Features, Content-based Audio Retrieval, Extern, FH SP Data Analytics \& Visual Computing, Feature Design, Feature Taxonomy, Institut für Creative Media Technologies, Media Computing Group, Sound Recognition, Wiss. Beitrag, best-mzeppelzauer, cepstral features, pattern recognition, spectral features, survey, temporal features}, pages = {71--150}, }