@article{slijepcevic_input_2020, title = {Input {Representations} and {Classification} {Strategies} for {Automated} {Human} {Gait} {Analysis}}, volume = {76}, issn = {0966-6362}, doi = {10/ghz24x}, journal = {Gait \& Posture}, author = {Slijepcevic, Djordje and Zeppelzauer, Matthias and Schwab, Caterine and Raberger, Anna-Maria and Breitender, Christian and Horsak, Brian}, year = {2020}, note = {Projekt: IntelliGait Projekt: I3D Projekt: ReMoCap-Lab Projekt: DHLab}, keywords = {2020, Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Department Gesundheit, Eintrag überprüfen, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Green OA, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, Open Access, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, peer-reviewed}, pages = {198--203}, } @inproceedings{slijepcevic_usefulness_2019, address = {Vienna, Austria}, title = {On the usefulness of statistical parameter mapping for feature selection in automated gait classification}, booktitle = {Book of {Abstracts} of the 25th {Conference} of the {European} {Society} of {Biomechanics} ({ESB})}, author = {Slijepcevic, Djordje and Raberger, Anna-Maria and Zeppelzauer, Matthias and Dumphart, Bernhard and Breiteneder, Christian and Horsak, Brian}, year = {2019}, note = {Projekt: IntelliGait Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Digital Health, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Vortrag, Wiss. Beitrag, best, best-bhorsak, peer-reviewed, ⛔ No DOI found}, pages = {1}, } @article{wagner_kavagait_2018, title = {{KAVAGait}: {Knowledge}-{Assisted} {Visual} {Analytics} for {Clinical} {Gait} {Analysis}}, volume = {25}, url = {https://doi.org/10.1109/TVCG.2017.2785271}, doi = {10/ghppzn}, abstract = {In 2014, more than 10 million people in the US were affected by an ambulatory disability. Thus, gait rehabilitation is a crucial part of health care systems. The quantification of human locomotion enables clinicians to describe and analyze a patient’s gait performance in detail and allows them to base clinical decisions on objective data. These assessments generate a vast amount of complex data which need to be interpreted in a short time period. We conducted a design study in cooperation with gait analysis experts to develop a novel Knowledge-Assisted Visual Analytics solution for clinical Gait analysis (KAVAGait). KAVAGait allows the clinician to store and inspect complex data derived during clinical gait analysis. The system incorporates innovative and interactive visual interface concepts, which were developed based on the needs of clinicians. Additionally, an explicit knowledge store (EKS) allows externalization and storage of implicit knowledge from clinicians. It makes this information available for others, supporting the process of data inspection and clinical decision making. We validated our system by conducting expert reviews, a user study, and a case study. Results suggest that KAVAGait is able to support a clinician during clinical practice by visualizing complex gait data and providing knowledge of other clinicians.}, number = {3}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, author = {Wagner, Markus and Slijepcevic, Djordje and Horsak, Brian and Rind, Alexander and Zeppelzauer, Matthias and Aigner, Wolfgang}, year = {2018}, note = {Projekt: KAVA-Time Projekt: IntelliGait Projekt: CARMA Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHLab, Design Study, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Healthcare, Human Gait Analysis, Human-Computer Interaction, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Visual analytics, Wiss. Beitrag, best, best-bhorsak, best-lbaigner, best-lbwagnerm, best-mzeppelzauer, information visualization, knowledge generation, peer-reviewed}, pages = {1528--1542}, } @article{slijepcevic_automatic_2018, title = {Automatic {Classification} of {Functional} {Gait} {Disorders}}, volume = {5}, issn = {2168-2194}, url = {https://arxiv.org/abs/1712.06405}, doi = {10/ghz24w}, number = {22}, urldate = {2017-12-21}, journal = {IEEE Journal of Biomedical and Health Informatics}, author = {Slijepcevic, Djordje and Zeppelzauer, Matthias and Raberger, Anna-Maria and Schwab, Caterine and Schuller, Michael and Baca, Arnold and Breiteneder, Christian and Horsak, Brian}, year = {2018}, note = {Projekt: IntelliGait Projekt: CARMA Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHLab, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, best-mzeppelzauer, peer-reviewed}, pages = {1653 -- 1661}, } @inproceedings{schwab_intelligait_2018, address = {Hamburg, Deutschland}, title = {{IntelliGait}: {Automatische} {Gangmusteranalyse} für die robuste {Erkennung} von {Gangstörungen}}, booktitle = {Tagungsband des 2ten {GAMMA} {Kongress} ({Gesellschaft} für die {Analyse} {Menschlicher} {Motorik} in ihrer klinischen {Anwendung})}, author = {Schwab, Caterine and Slijepcevic, Djordje and Zeppelzauer, Matthias and Raberger, Anna-Maria and Dumphart, Bernhard and Baca, Arnold and Breitender, Christian and Horsak, Brian}, year = {2018}, note = {Projekt: IntelliGait Projekt: CARMA Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Creative Industries, DHLab, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, Pattern recognition, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, peer-reviewed, ⛔ No DOI found}, } @inproceedings{slijepcevic_ground_2017, address = {Trondheim, Norway}, title = {Ground reaction force measurements for gait classification tasks: {Effects} of different {PCA}-based representations}, volume = {57}, url = {http://www.gaitposture.com/article/S0966-6362(17)30712-9/pdf}, doi = {10.1016/j.gaitpost.2017}, booktitle = {Gait \& {Posture} {Supplement}}, author = {Slijepcevic, Djordje and Horsak, Brian and Schwab, Caterine and Raberger, Anna-Maria and Schüller, Michael and Baca, Arnold and Breitender, Christian and Zeppelzauer, Matthias}, year = {2017}, note = {Projekt: IntelliGait Projekt: DHLab}, keywords = {2017, Biofeedback, Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Creative Industries, DHLab, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, peer-reviewed, project\_carma, project\_intelligait, ⚠️ Invalid DOI}, pages = {4--5}, } @misc{zeppelzauer_project_2018, address = {Linz, Austria}, type = {Exhibition}, title = {Project {SoniTalk}}, abstract = {Presentation of the Project SoniControl at ARS Electronica Festival 2018}, language = {English}, author = {Zeppelzauer, Matthias and {Alexis Ringot} and {Florian Taurer}}, month = sep, year = {2018}, keywords = {2017, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Department Technologie, FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Wiss. Beitrag}, } @book{seidl_fmt_2018, title = {{FMT} 2018 - {Proceedings} of the 11th {Forum} {Media} {Technology} and 4th {All} {Around} {Audio} {Symposium}}, url = {http://ceur-ws.org/Vol-2299/}, publisher = {CEUR-WS.org}, editor = {Seidl, Markus and Moser, Thomas and Blumenstein, Kerstin and Zeppelzauer, Matthias and Iber, Michael}, year = {2018}, keywords = {Center for Artificial Intelligence, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Media Computing Group, Wiss. Beitrag, peer-reviewed}, } @misc{zeppelzauer_content-based_2018, address = {Potsdam, Germany}, type = {Invited talk}, title = {Content-{Based} {Analysis} of {Stylistic} {Features} in {Archive} {Documentaries}}, language = {English}, author = {Zeppelzauer, Matthias}, month = apr, year = {2018}, keywords = {2018, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Publikationstyp Vortrag, Wiss. Beitrag}, } @misc{zeppelzauer_visual_2018, address = {Krakow, Poland}, type = {Invited talk}, title = {Visual {Pattern} {Discovery} by {Deep} {Learning} for the {Extraction} of {Building} {Characteristics}}, language = {English}, author = {Zeppelzauer, Matthias}, month = dec, year = {2018}, keywords = {2018, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Publikationstyp Vortrag, Wiss. Beitrag}, } @misc{zeppelzauer_sonicontrol_2018, address = {Seoul, South Korea}, type = {Demo {Talk}}, title = {{SoniControl} - {A} {Mobile} {Ultrasonic} {Firewall}}, url = {http://arxiv.org/abs/1807.07617}, abstract = {The exchange of data between mobile devices in the near-ultrasonic frequency band is a new promising technology for near field communication (NFC) but also raises a number of privacy concerns. We present the first ultrasonic firewall that reliably detects ultrasonic communication and provides the user with effective means to prevent hidden data exchange. This demonstration showcases a new media-based communication technology ("data over audio") together with its related privacy concerns. It enables users to (i) interactively test out and experience ultrasonic information exchange and (ii) shows how to protect oneself against unwanted tracking.}, urldate = {2018-10-10}, author = {Zeppelzauer, Matthias and Ringot, Alexis and Taurer, Florian}, month = jul, year = {2018}, note = {arXiv: 1807.07617}, keywords = {2018, Center for Artificial Intelligence, Computer Science - Cryptography and Security, Computer Science - Multimedia, Department Medien und Digitale Technologien, Department Technologie, FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Präsentation, Wiss. Beitrag}, } @misc{zeppelzauer_automatic_2018, address = {Yokohama, Japan}, title = {Automatic {Prediction} of {Building} {Age} from {Photographs}}, url = {http://dl.acm.org/citation.cfm?doid=3206025.3206060}, language = {en}, urldate = {2018-10-10}, author = {Zeppelzauer, Matthias}, year = {2018}, note = {Projekt: ImmBild}, keywords = {2018, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Präsentation, Wiss. Beitrag, peer-reviewed}, } @misc{zeppelzauer_sonicontrol_2018-1, address = {Munich, Germany}, type = {Keynote}, title = {{SoniControl} – {Die} erste {Ultraschall}-{Firewall} für {Handys}}, language = {English}, author = {Zeppelzauer, Matthias}, month = jun, year = {2018}, keywords = {2018, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Department Technologie, FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Publikationstyp Vortrag, Wiss. Beitrag}, } @misc{zeppelzauer_principles_2018, address = {Amsterdam, Netherlands}, type = {Invited talk}, title = {On the principles of feature extraction for visual analysis in media history: a brief introduction}, language = {English}, author = {Zeppelzauer, Matthias}, month = jul, year = {2018}, keywords = {2018, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Publikationstyp Vortrag, Wiss. Beitrag}, } @misc{zeppelzauer_persistence_2018, address = {IST Austria, Klosterneuburg, Austria}, type = {Conference {Talk}}, title = {Persistence {Codebooks} for {Topological} {Data} {Analysis}}, language = {English}, author = {Zeppelzauer, Matthias}, month = jun, year = {2018}, keywords = {2019, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Publikationstyp Vortrag, Wiss. Beitrag, peer-reviewed}, } @misc{zeppelzauer_intelligait_2018, address = {St. Pölten, Austria}, type = {Invited talk}, title = {{IntelliGait} - {Automatic} gait pattern analysis for robust classification of functional deficits}, language = {English}, author = {Zeppelzauer, Matthias}, month = jun, year = {2018}, keywords = {2018, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Publikationstyp Vortrag, Wiss. Beitrag}, } @misc{zeppelzauer_visual_2018-1, address = {Yokohama, Japan}, title = {Visual {Estimation} of {Building} {Condition} with {Patch}-level {ConvNets}}, url = {http://dl.acm.org/citation.cfm?doid=3210499.3210526}, language = {en}, urldate = {2018-10-10}, author = {Zeppelzauer, Matthias}, year = {2018}, note = {Projekt: ImmBild Projekt: ImmoAge}, keywords = {2018, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Vortrag, Wiss. Beitrag}, } @misc{zeppelzauer_automatic_2018-1, address = {Köln, Deutschland}, type = {Invited talk}, title = {Automatic {Approaches} for the {Analysis}, {Retrieval}, and {Annotation} of {Film} and {Video}}, language = {English}, author = {Zeppelzauer, Matthias}, month = jan, year = {2018}, keywords = {2018, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Publikationstyp Vortrag, Wiss. Beitrag}, } @misc{zeppelzauer_ultrasonic_2018, address = {Wien, Austria}, type = {Invited talk}, title = {Ultrasonic {Communication} {Risks} and {Chances} of a {Novel} {Technology}}, abstract = {The ultrasonic frequency band represents a novel and so far hardly used channel for the communication of different devices, such as mobile phones, computers, TVs, and personal assistants like Google Chromecast. Ultrasonic communication is a promising technology since it requires only a standard loudspeaker and a microphone (as built into our phones) for communication. While offering a number of opportunities for innovative services (e.g. in the domain of Internet of Things), the technology, however, also bears a number risks. Companies like Silverpush employ ultrasonic data exchange to track users across devices and to collect information about their behavoir without their knowledge. In my talk I will present the novel technology of ultrasonic communication, show how it works and which risks and chances are linked to it. Additionally, I will present the project SoniControl which aims at the development of an ultrasonic firewall to protect the privacy of users as well as the project SoniTalk which aims at developing a safe and privacy-oriented protocol for ultrasonic communication.}, language = {English}, author = {Zeppelzauer, Matthias}, month = jan, year = {2018}, note = {Projekt: SoCo}, keywords = {2018, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Department Technologie, FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Publikationstyp Vortrag, Wiss. Beitrag}, } @book{aigner_fmt_2017, title = {{FMT} 2017 - {Proceedings} of the 10th {Forum} {Media} {Technology} and 3rd {All} {Around} {Audio} {Symposium}}, url = {http://ceur-ws.org/Vol-2009/}, publisher = {CEUR-WS.org}, editor = {Aigner, Wolfgang and Moser, Thomas and Blumenstein, Kerstin and Zeppelzauer, Matthias and Iber, Michael and Schmiedl, Grischa}, year = {2017}, keywords = {2017, Audio Design, Center for Artificial Intelligence, Center for Digital Health Innovation, Computer Science, Data Modeling, Digital Media Experience, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Media Computing Group, Media Technology, Mobile Applications, Publikationstyp Schriftpublikation, Wiss. Beitrag, data analysis, information visualization, peer-reviewed, usability}, } @misc{zeppelzauer_retrieval_2017, address = {Vienna University of Technology, Austria}, type = {Habilitation {Proposition}}, title = {Retrieval {Methods} for {Multimodal} {Media} {Data}}, abstract = {Habilitation Proposition Talk at Vienna University of Technology}, language = {English}, author = {Zeppelzauer, Matthias}, month = may, year = {2017}, keywords = {2017, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Wiss. Beitrag}, } @misc{zeppelzauer_acoustic_2017, address = {Novomatic Forum, Vienna, Austria}, type = {Invited talk}, title = {Acoustic {Ultrasound} {Tracking}}, abstract = {Keynote on ultrasound tracking at the Fachkonferenz The Mobile Enterprise}, language = {English}, author = {Zeppelzauer, Matthias}, month = jun, year = {2017}, note = {Projekt: SoCo}, keywords = {2017, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Department Technologie, FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Wiss. Beitrag}, } @misc{zeppelzauer_automated_2017, address = {University Regensburg, Germany}, type = {Invited talk}, title = {Automated {Analysis}, {Retrieval} and {Annotation} of {Video} and {Film}}, abstract = {Presentation of content-based image and video analysis methods for automated film analysis at University of Regensburg}, language = {English}, author = {Zeppelzauer, Matthias}, month = jul, year = {2017}, keywords = {2017, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Wiss. Beitrag}, } @book{aigner_fmt_2016, title = {{FMT} 2016 - {Proceedings} of the 9th {Forum} {Media} {Technology} and 2nd {All} {Around} {Audio} {Symposium}}, isbn = {978-1-326-88118-4}, url = {http://ceur-ws.org/Vol-1734/}, publisher = {CEUR-WS.org}, editor = {Aigner, Wolfgang and Schmiedl, Grischa and Blumenstein, Kerstin and Zeppelzauer, Matthias and Iber, Michael}, month = nov, year = {2016}, keywords = {2016, Center for Artificial Intelligence, Center for Digital Health Innovation, Creative Industries, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Media Computing Group, Publikationstyp Schriftpublikation, Wiss. Beitrag, peer-reviewed}, } @inproceedings{poier_3d-pitoti_2017, address = {New York, NY, USA}, series = {{CBMI} '17}, title = {The {3D}-{Pitoti} {Dataset}: {A} {Dataset} for {High}-resolution {3D} {Surface} {Segmentation}}, isbn = {978-1-4503-5333-5}, url = {https://arxiv.org/pdf/1610.01944}, doi = {10/ghpp2j}, booktitle = {Proceedings of the 15th {International} {Workshop} on {Content}-{Based} {Multimedia} {Indexing}}, publisher = {ACM}, author = {Poier, Georg and Seidl, Markus and Zeppelzauer, Matthias and Reinbacher, Christian and Schaich, Martin and Bellandi, Giovanna and Marretta, Alberto and Bischof, Horst}, month = nov, year = {2017}, note = {event-place: Florence, Italy}, keywords = {2017, 3D Surface Segmentation, Center for Artificial Intelligence, Computer Vision, Dataset, Department Medien und Digitale Technologien, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Petroglyphs, Publikationstyp Schriftpublikation, Segmentation, Wiss. Beitrag, peer-reviewed}, pages = {5:1--5:7}, } @inproceedings{zeppelzauer_automatic_2018-2, address = {Yokohama, Japan}, title = {Automatic {Prediction} of {Building} {Age} from {Photographs}}, isbn = {978-1-4503-5046-4}, url = {https://arxiv.org/pdf/1804.02205}, doi = {10/ghpp2k}, language = {en}, urldate = {2018-10-10}, booktitle = {Proceedings of the {ACM} {International} {Conference} on {Multimedia} {Retrieval} ({ICMR} '18)}, publisher = {ACM Press}, author = {Zeppelzauer, Matthias and Despotovic, Miroslav and Sakeena, Muntaha and Koch, David and Döller, Mario}, year = {2018}, note = {Projekt: ImmBild Projekt: ImmoAge}, keywords = {Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Visual Computing, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {126--134}, } @misc{zeppelzauer_project_2017, address = {Linz, Austria}, type = {Exhibition}, title = {Project {SoniControl}}, abstract = {Presentation of the Project SoniControl at ARS Electronica Festival 2017}, language = {English}, author = {Zeppelzauer, Matthias}, month = sep, year = {2017}, note = {Projekt: SoCo}, keywords = {2017, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Department Technologie, FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Wiss. Beitrag}, } @misc{zeppelzauer_ultrasonic_2017, address = {St. Pölten, Austria}, type = {Invited talk}, title = {Ultrasonic {Communication} {Risks} and {Chances} of a {Novel} {Technology}}, abstract = {The ultrasonic frequency band represents a novel and so far hardly used channel for the communication of different devices, such as mobile phones, computers, TVs, and personal assistants like Google Chromecast. Ultrasonic communication is a promising technology since it requires only a standard loudspeaker and a microphone (as built into our phones) for communication. While offering a number of opportunities for innovative services (e.g. in the domain of Internet of Things), the technology, however, also bears a number risks. Companies like Silverpush employ ultrasonic data exchange to track users across devices and to collect information about their behavoir without their knowledge. In my talk I will present the novel technology of ultrasonic communication, show how it works and which risks and chances are linked to it. Additionally, I will present the project SoniControl which aims at the development of an ultrasonic firewall to protect the privacy of users as well as the project SoniTalk which aims at developing a safe and privacy-oriented protocol for ultrasonic communication.}, language = {English}, author = {Zeppelzauer, Matthias}, year = {2017}, note = {Projekt: SoCo}, keywords = {2017, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Department Technologie, FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Wiss. Beitrag}, } @misc{zeppelzauer_project_2017-1, address = {Vienna, Austria}, type = {Invited talk}, title = {Project {SoniTalk}}, url = {https://www.fhstp.ac.at/de/newsroom/news/erfolg-fuer-sonitalk-beim-netidee-community-event?filter=awards}, abstract = {Project presentation of SoniTalk Project}, language = {English}, author = {Zeppelzauer, Matthias and {Alexis Ringot}}, year = {2017}, note = {Projekt: SoniTalk}, keywords = {2018, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Department Technologie, FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Publikationstyp Präsentation, Wiss. Beitrag}, } @inproceedings{slijepcevic_towards_2018, address = {Prague, Czech Republic}, title = {Towards an optimal combination of input signals and derived representations for gait classification based on ground reaction force measurements.}, volume = {65}, doi = {10/gh38wn}, booktitle = {Gait \& {Posture} {Supplement}}, author = {Slijepcevic, Djordje and Zeppelzauer, Matthias and Schwab, Caterine and Raberger, Anna-Maria and Dumphart, B and Baca, Arnold and Breiteneder, Christian and Horsak, Brian}, year = {2018}, note = {Projekt: IntelliGait Projekt: CARMA Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health and Social Innovation, Classification, DHLab, FH SP Data Analytics \& Visual Computing, Feature Representations, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Gait Recognition, Human Gait Analysis, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, PCA, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, SVM, Wiss. Beitrag, best, best-bhorsak, pattern recognition, peer-reviewed}, } @inproceedings{koch_visual_2018, address = {Yokohama, Japan}, title = {Visual {Estimation} of {Building} {Condition} with {Patch}-level {ConvNets}}, isbn = {978-1-4503-5797-5}, url = {http://dl.acm.org/citation.cfm?doid=3210499.3210526}, doi = {10/ghpp2m}, language = {en}, urldate = {2018-10-10}, booktitle = {Proceedings of the 2018 {ACM} {Workshop} on {Multimedia} for {Real} {Estate} {Tech} - {RETech}'18}, publisher = {ACM Press}, author = {Koch, David and Despotovic, Miroslav and Sakeena, Muntaha and Döller, Mario and Zeppelzauer, Matthias}, year = {2018}, note = {Projekt: ImmBild Projekt: ImmoAge}, keywords = {Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Wiss. Beitrag, peer-reviewed}, pages = {12--17}, } @inproceedings{zeppelzauer_sonicontrol_2018-2, address = {Seoul, South Korea}, title = {{SoniControl} - {A} {Mobile} {Ultrasonic} {Firewall}}, url = {https://arxiv.org/abs/1807.07617}, doi = {10/gh377f}, abstract = {The exchange of data between mobile devices in the near-ultrasonic frequency band is a new promising technology for near field communication (NFC) but also raises a number of privacy concerns. We present the first ultrasonic firewall that reliably detects ultrasonic communication and provides the user with effective means to prevent hidden data exchange. This demonstration showcases a new media-based communication technology ("data over audio") together with its related privacy concerns. It enables users to (i) interactively test out and experience ultrasonic information exchange and (ii) shows how to protect oneself against unwanted tracking.}, urldate = {2018-10-10}, booktitle = {Proceedings of the {ACM} {International} {Conference} on {Multimedia}}, publisher = {ACM Press}, author = {Zeppelzauer, Matthias and Ringot, Alexis and Taurer, Florian}, year = {2018}, note = {arXiv: 1807.07617}, keywords = {Acoustic Cookies, Acoustic Firewall, Acoustic Tracking, Center for Artificial Intelligence, Computer Science - Cryptography and Security, Computer Science - Multimedia, FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Ultrasound Data Transmission, Wiss. Beitrag, best, best-aringot, peer-reviewed}, } @article{despotovic_prediction_2019, title = {Prediction and analysis of heating energy demand for detached houses by computer vision}, volume = {193}, issn = {0360-5442}, url = {https://www.sciencedirect.com/science/article/pii/S0378778818336430?via%3Dihub}, doi = {10/fsxn}, abstract = {Exterior images of real estate contain a large number of visual clues which allow conclusions about the heating energy demand (HED) of a building. Up to now, HED has been determined by specially trained experts such as architects, civil engineers, etc. either on the basis of consumption data or estimated demand values. In this article, we present a novel approach to determine the HED of detached houses. Our suggested approach is based solely on the visual appearance and assumes that exterior images of a building contain a variety of information that allows inferences about the HED of a building. For this, we use the powerful techniques of image analysis and computer vision which are already successfully used in different domains like surveillance, image search, and robotics. The results show that our approach works well and in addition to the HED, the construction period of a building can also be determined. Our algorithm achieves a classification accuracy of 62\% for HED and 57\% for construction age epoch.}, journal = {Energy \& Buildings}, author = {Despotovic, Miroslav and Koch, David and Leiber, Sascha and Döller, Mario and Sakeena, Muntaha and Zeppelzauer, Matthias}, year = {2019}, note = {Projekt: ImmBild Projekt: ImmoAge}, keywords = {Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Visual Computing, Wiss. Beitrag, best, peer-reviewed}, pages = {29--35}, } @article{bernard_vial_2018, title = {{VIAL} – {A} {Unified} {Process} for {Visual}-{Interactive} {Labeling}}, volume = {34}, copyright = {Springer, Berlin, Heidelberg}, issn = {1432-2315}, url = {https://bit.ly/2My1Yrt}, doi = {10/gd5hr3}, abstract = {The assignment of labels to data instances is a fundamental prerequisite for many machine learning tasks. Moreover, labeling is a frequently applied process in visual-interactive analysis approaches and visual analytics. However, the strategies for creating labels usually differ between these two fields. This raises the question whether synergies between the different approaches can be attained. In this paper, we study the process of labeling data instances with the user in the loop, from both the machine learning and visual-interactive perspective. Based on a review of differences and commonalities, we propose the ’Visual-Interactive Labeling‘ (VIAL) process that unifies both approaches. We describe the six major steps of the process and discuss their specific challenges. Additionally, we present two heterogeneous usage scenarios from the novel VIAL perspective, one on metric distance learning and one on object detection in videos. Finally, we discuss general challenges to VIAL and point out necessary work for the realization of future VIAL approaches.}, number = {1189}, journal = {The Visual Computer}, author = {Bernard, Jürgen and Zeppelzauer, Matthias and Sedlmair, Michael and Aigner, Wolfgang}, year = {2018}, note = {Projekt: KAVA-Time Projekt: IntelliGait Projekt: CARMA}, keywords = {Active Learning, Candidate Selection, Center for Artificial Intelligence, Creative Industries, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Interactive Labeling, Labeling Strategies, Machine Learning, Media Computing Group, Visual Interactive Labeling, best, best-mzeppelzauer, information visualization}, pages = {16}, } @article{koch_real_2020, title = {Real {Estate} {Image} {Analysis} - {A} {Literature} {Review}}, volume = {27(2)}, issn = {0927-7544}, url = {https://www.tandfonline.com/doi/pdf/10.22300/0927-7544.27.2.269?needAccess=true}, doi = {10/gnt2wg}, journal = {Journal of Real Estate Literature}, author = {Koch, David and Despotovic, Miroslav and Sascha, Leiber and Sakeena, Muntaha and Döller, Mario and Zeppelzauer, Matthias}, year = {2020}, note = {Projekt: ImmBild Projekt: ImmoAge}, keywords = {Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Wiss. Beitrag, peer-reviewed}, pages = {269--300}, } @inproceedings{bernard_jurgen_learning_2018, address = {Brno, Czech Republic}, title = {Learning from the {Best}–{Visual} {Analysis} of a {Quasi}-{Optimal} {Data} {Labeling} {Strategy}}, abstract = {An overarching goal of active learning strategies is to reduce the human effort when labeling datasets and training machine learning methods. In this work, we focus on the analysis of a (theoretical) quasi-optimal, ground-truth-based strategy for labeling instances, which we refer to as the upper limit of performance (ULoP). Our long-term goal is to improve existing active learning strategies and to narrow the gap between current strategies and the outstanding performance of ULoP. In an observational study conducted on five datasets, we leverage visualization methods to better understand how and why ULoP selects instances. Results show that the strategy of ULoP is not constant (as in most state-of-the-art active learning strategies) but changes within the labeling process. We identify three phases that are common to most observed labeling processes, partitioning the labeling process into (1) a Discovery Phase, (2) a Consolidation Phase, and (3) a Fine Tuning Phase.}, booktitle = {Proceedings of the {Eurographics} {Conference} on {Visualization} ({EuroVis})}, author = {{Bernard, Jürgen} and {Hutter, Marco} and {Lehmann, Markus} and {Müller, Martin} and {Zeppelzauer, Matthias} and {Sedlmair, Michael}}, year = {2018}, keywords = {Center for Artificial Intelligence, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Visual analytics, Wiss. Beitrag, peer-reviewed, ⛔ No DOI found}, pages = {4}, } @inproceedings{bernard_jurgen_unified_2017, address = {Barcelona, Spain}, title = {A {Unified} {Process} for {Visual}-{Interactive} {Labeling}}, booktitle = {In {Proceedings} of the 8th {International} {EuroVis} {Workshop} on {Visual} {Analytics}}, author = {{Bernard, Jürgen} and {Zeppelzauer, Matthias} and {Sedlmair, Michael} and Hutter, Marco}, year = {2017}, note = {Projekt: KAVA-Time}, keywords = {2017, Center for Artificial Intelligence, Creative Industries, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Visual analytics, Wiss. Beitrag, peer-reviewed, ⛔ No DOI found}, } @inproceedings{muhr_towards_2017, address = {St. Pölten, Austria}, title = {Towards {Automated} {Real} {Estate} {Assessment} from {Satellite} {Images} with {CNNs}}, volume = {2009}, booktitle = {In {Proceedings} of the 10th {Forum} {Media} {Technology} ({FMT})}, publisher = {CEUR Workshop Proceedings}, author = {Muhr, Valentin and Despotovic, Miroslav and Koch, David and {Döller, Mario} and Zeppelzauer, Matthias}, year = {2017}, note = {Projekt: ImmBild Projekt: InfraBase}, keywords = {2017, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Wiss. Beitrag, best paper award, peer-reviewed, ⛔ No DOI found}, pages = {14--23}, } @inproceedings{kirchknopf_detection_2018, address = {Sophia Antipolis, France}, title = {Detection of {Road} {Passability} from {Social} {Media} and {Satellite} {Images}}, abstract = {This paper presents the contribution of Team MC-FHSTP to the multimedia satellite task at the MediaEval 2018 benchmark. We present two methods, one for the estimation of the passability of roads from social media images due to flooding and one method that estimates passability from satellite images. We present the results obtained in the benchmark for both methods.}, booktitle = {{CEUR} {Proceedings} of the {MediaEval} 2018 {Workshop}}, author = {Kirchknopf, Armin and Slijepcevic, Djordje and Zeppelzauer, Matthias and Seidl, Markus}, year = {2018}, keywords = {Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Vortrag, Wiss. Beitrag, ⛔ No DOI found}, pages = {2}, } @inproceedings{seidl_markus_towards_2019, address = {Dublin, Irland}, title = {Towards {Distinction} of {Rock} {Art} {Pecking} {Styles} with a {Hybrid} {2D}/{3D} {Approach}}, booktitle = {Proceedings of the {International} {Conference} on {Content}-based {Multimedia} {Indexing} ({CBMI})}, author = {{Seidl, Markus} and {Zeppelzauer, Matthias}}, year = {2019}, note = {Projekt: PITOTI 3D}, keywords = {Center for Artificial Intelligence, Computer Vision, Digital Heritage, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Visual Computing, Vortrag, Wiss. Beitrag, best, peer-reviewed, ⛔ No DOI found}, pages = {4}, } @inproceedings{zielinski_persistence_2019, address = {Macao, China}, title = {Persistence {Bag}-of-{Words} for {Topological} {Data} {Analysis}}, url = {http://arxiv.org/abs/1802.04852}, doi = {10/ghpp7z}, urldate = {2018-10-10}, booktitle = {Proceedings of the {International} {Joint} {Conference} on {Artificial} {Intelligence} 2019}, author = {Zielinski, Bartosz and {Lipinski, Michal} and Juda, Mateusz and Zeppelzauer, Matthias and {Dlotko, Pawel}}, year = {2019}, note = {arXiv: 1802.04852}, keywords = {Artificial Intelligence, Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Mathematics - Algebraic Topology, Media Computing Group, Statistics, Vortrag, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {6}, } @inproceedings{sakeena_muntaha_combining_2019, address = {Steyr, Austria}, title = {Combining {Deep} {Learning} and {Variational} {Level} {Sets} for {Segmentation} of {Buildings}}, url = {https://workshops.aapr.at/wp-content/uploads/2019/05/ARW-OAGM19_42.pdf}, doi = {10/ghz24z}, booktitle = {Proceedings of {ARW} \& {OAGM} {Workshop} 2019}, author = {{Sakeena, Muntaha} and {Zeppelzauer, Matthias}}, year = {2019}, keywords = {Artificial Intelligence, Best Poster Award, Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Visual Computing, Vortrag, Wiss. Beitrag, peer-reviewed}, pages = {2}, } @inproceedings{bernard_visual_2019, address = {Porto, Portugal}, title = {Visual {Analysis} of {Degree}-of-{Interest} {Functions} to {Support} {Selection} {Strategies} for {Instance} {Labeling}}, isbn = {978-3-03868-087-1}, url = {https://diglib.eg.org/handle/10.2312/eurova20191116}, doi = {10/ghpp2p}, booktitle = {{EuroVis} {Workshop} on {Visual} {Analytics} ({EuroVA})}, publisher = {The Eurographics Association}, author = {Bernard, Jürgen and Hutter, Marco and Ritter, Christian and Lehmann, Markus and Sedlmair, Michael and Zeppelzauer, Matthias}, year = {2019}, keywords = {Center for Artificial Intelligence, Data Science, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Visual Computing, Visual analytics, Wiss. Beitrag, peer-reviewed}, } @inproceedings{bernard_towards_2018, address = {Brno, Czech Republic}, title = {Towards {User}-{Centered} {Active} {Learning} {Algorithms}}, volume = {37}, url = {http://doi.wiley.com/10.1111/cgf.13406}, doi = {10/gdw79h}, language = {en}, urldate = {2018-10-10}, booktitle = {Computer {Graphics} {Forum}}, author = {Bernard, Jürgen and Zeppelzauer, Matthias and Lehmann, Markus and Müller, Martin and Sedlmair, Michael}, year = {2018}, keywords = {Center for Artificial Intelligence, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Visual analytics, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {121--132}, } @article{despotovic_predicting_2017, series = {1}, title = {Predicting {Heating} {Energy} {Demand} by {Computer} {Vision}}, volume = {33}, issn = {1865-2034}, url = {http://www.springer.com/-/1/AV5EQLmWpRR8A1ooSeIw}, doi = {10/gh3772}, abstract = {In many countries such as Austria the heating energy demand (HED) is an essential parameter of the energy certification of houses. In this paper, we present an approach in which the HED category for a single family house is---for the first time---determined from a standard photograph directly by means of computer vision and machine learning.}, journal = {Computer Science - Research and Development}, author = {Despotovic, Miroslav and Sakeena, Muntaha and Koch, David and Döller, Mario and Zeppelzauer, Matthias}, year = {2017}, note = {Projekt: ImmBild Projekt: ImmoAge}, keywords = {2017, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Wiss. Beitrag, peer-reviewed}, pages = {231--232}, } @inproceedings{bernard_visual-interactive_2017, address = {Porto, Portugal}, title = {Visual-{Interactive} {Similarity} {Search} for {Complex} {Objects} by {Example} of {Soccer} {Player} {Analysis}}, isbn = {978-989-758-228-8}, doi = {10/ghz24s}, abstract = {The definition of similarity is a key prerequisite when analyzing complex data types in data mining, information retrieval, or machine learning. However, the meaningful definition is often hampered by the complexity of data objects and particularly by different notions of subjective similarity latent in targeted user groups. Taking the example of soccer players, we present a visual-interactive system that learns users’ mental models of similarity. In a visual-interactive interface, users are able to label pairs of soccer players with respect to their subjective notion of similarity. Our proposed similarity model automatically learns the respective concept of similarity using an active learning strategy. A visual-interactive retrieval technique is provided to validate the model and to execute downstream retrieval tasks for soccer player analysis. The applicability of the approach is demonstrated in different evaluation strategies, including usage scenarions and cross-validation tests.}, booktitle = {Proceedings of the 12th {International} {Joint} {Conference} on {Computer} {Vision}, {Imaging} and {Computer} {Graphics} {Theory} and {Applications} - {Volume} 3: {IVAPP}}, author = {Bernard, Jürgen and Ritter, Christian and Sessler, David and Zeppelzauer, Matthias and Kohlhammer, Jörn and Fellner, Dieter}, year = {2017}, keywords = {2017, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Visual analytics, Wiss. Beitrag, peer-reviewed}, pages = {75--87}, } @article{zeppelzauer_study_2018, title = {A {Study} on {Topological} {Descriptors} for the {Analysis} of {3D} {Surface} {Texture}}, volume = {167}, issn = {1077-3142}, url = {https://arxiv.org/pdf/1710.10662}, doi = {10/ghpp2h}, abstract = {Methods from computational topology are becoming more and more popular in computer vision and have shown to improve the state-of-the-art in several tasks. In this paper, we investigate the applicability of topological descriptors in the context of 3D surface analysis for the classification of different surface textures. We present a comprehensive study on topological descriptors, investigate their robustness and expressiveness and compare them with state-of-the-art methods. Results show that class-specific information is reflected well in topological descriptors. The investigated descriptors can directly compete with non-topological descriptors and capture orthogonal information. Moreover they improve the state-of-the-art in combination with non-topological descriptors.}, journal = {Journal on Computer Vision and Image Understanding (CVIU)}, author = {Zeppelzauer, Matthias and Zielinski, Bartosz and Juda, Mateusz and Seidl, Markus}, year = {2018}, note = {Projekt: PITOTI 3D}, keywords = {3D surface classification, Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Surface texture analysis, Visual Computing, Wiss. Beitrag, best, best-lbseidl, best-mzeppelzauer, peer-reviewed, persistence diagram, persistence image, persistent homology, surface representation, surface topology analysis}, pages = {74 -- 88}, } @article{bernard_jurgen_comparing_2017, title = {Comparing {Visual}-{Interactive} {Labeling} with {Active} {Learning}: {An} {Experimental} {Study}}, volume = {24}, issn = {1077-2626}, url = {http://eprints.cs.univie.ac.at/5257/1/bernard2017labeling.pdf}, doi = {10/gcqb3r}, number = {1}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, author = {{Bernard, Jürgen} and Hutter, Marco and Zeppelzauer, Matthias and Fellner, Dieter and Sedlmair, Michael}, year = {2017}, keywords = {2017, Center for Artificial Intelligence, Creative Industries, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, } @inproceedings{ritter_personalized_2018, address = {Brno, Czech Republic}, title = {Personalized {Visual}-{Interactive} {Music} {Classification}}, isbn = {978-3-03868-064-2}, url = {https://diglib.eg.org/handle/10.2312/eurova20181109}, doi = {10/ghz24t}, abstract = {We present an interactive visual music classification tool that will allow users to automatically structure music collections in a personalized way. With our approach, users play an active role in an iterative process of building classification models, using different interactive interfaces for labeling songs. The interactive tool conflates interfaces for the detailed analysis at different granularities, i.e., audio features, music songs, as well as classification results at a glance. Interactive labeling is provided with three complementary interfaces, combining model-centered and human-centered labeling-support principles. A clean visual design of the individual interfaces depicts complex model characteristics for experts, and indicates our work-inprogress towards the abilities of non-experts. The result of a preliminary usage scenario shows that, with our system, hardly any knowledge about machine learning is needed to create classification models of high accuracy with less than 50 labels.}, urldate = {2018-10-10}, booktitle = {{EuroVis} {Workshop} on {Visual} {Analytics} ({EuroVA})}, publisher = {The Eurographics Association}, author = {Ritter, Christian and Altenhofen, Christian and Zeppelzauer, Matthias and Kuijper, Arjan and Schreck, Tobias and Bernard, Jürgen}, year = {2018}, doi = {10.2312/eurova.20181109}, keywords = {Center for Artificial Intelligence, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Wiss. Beitrag, peer-reviewed}, } @article{musik_christoph_computer_2018, title = {Computer {Vision} and the {Digital} {Humanities}: {Adapting} {Image} {Processing} {Algorithms} and {Ground} {Truth} through {Active} {Learning}}, volume = {7}, issn = {2213-0969}, url = {https://www.viewjournal.eu/articles/abstract/198/}, doi = {10/ggbmx7}, number = {14}, journal = {VIEW Journal of European Television History and Culture}, author = {{Musik, Christoph} and {Zeppelzauer, Matthias}}, year = {2018}, keywords = {Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Business, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Institut für Medienwirtschaft, Machine Learning, Media Computing Group, Wiss. Beitrag, peer-reviewed}, pages = {59--72}, } @article{zeppelzauer_interactive_2016, title = {Interactive {3D} {Segmentation} of {Rock}-{Art} by {Enhanced} {Depth} {Maps} and {Gradient} {Preserving} {Regularization}}, volume = {9}, issn = {1556-4673}, url = {https://publik.tuwien.ac.at/files/publik_258520.pdf}, doi = {10/ghpp2n}, number = {4}, journal = {ACM Journal on Computing and Cultural Heritage}, author = {Zeppelzauer, Matthias and Poier, Georg and Seidl, Markus and Reinbacher, Christian and Schulter, Samuel, Christian and Breiteneder, C. and Bischof, Horst}, month = jul, year = {2016}, note = {Article 19 Projekt: PITOTI 3D}, keywords = {Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, peer-reviewed}, pages = {19:1--19:30}, } @article{zeppelzauer_study_2016, title = {A {Study} on {Topological} {Descriptors} for the {Analysis} of {3D} {Surface} {Texture}}, abstract = {Methods from computational topology are becoming more and more popular in computer vision and have shown to improve the state-of-the-art in several tasks. In this paper, we investigate the applicability of topological descriptors in the context of 3D surface analysis for the classification of different surface textures. We present a comprehensive study on topological descriptors, investigate their robustness and expressiveness and compare them with state-of-the-art methods. Results show that class-specific information is reflected well in topological descriptors. The investigated descriptors can directly compete with non-topological descriptors and capture orthogonal information. Moreover they improve the state-of-the-art in combination with non-topological descriptors.}, journal = {Journal on Computer and System Sciences}, author = {Zeppelzauer, Matthias and Zielinski, Bartosz and Juda, Mateusz and Seidl, Markus}, year = {2016}, note = {Projekt: PITOTI 3D}, keywords = {2016, 3D surface classification, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Media Computing Group, Publikationstyp Schriftpublikation, SP, Surface texture analysis, Wiss. Beitrag, best, best-lbseidl, peer-reviewed, persistence diagram, persistence image, persistent homology, surface representation, surface topology analysis, ⛔ No DOI found}, pages = {60}, } @article{zeppelzauer_towards_2014, title = {Towards an automated acoustic detection system for free-ranging elephants}, volume = {24}, url = {http://www.tandfonline.com/eprint/59RjKXiVvfCAAENvVZRc/full}, doi = {10/f3st62}, journal = {Bioacoustics}, author = {Zeppelzauer, Matthias and Hensman, S. and Stöger, A.}, year = {2014}, keywords = {Center for Artificial Intelligence, Extern, FH SP Data Analytics \& Visual Computing, Institut für Creative Media Technologies, Loxodonta africana, Machine Learning, Media Computing Group, Wiss. Beitrag, automated elephant detection, human–elephant conflict, spectral signal enhancement, wildlife monitoring}, pages = {13--19}, } @phdthesis{zeppelzauer_discrimination_2006, address = {Vienna}, type = {Diploma {Thesis}}, title = {Discrimination and {Retrieval} of {Animal} {Sounds}}, school = {Vienna University of Technology}, author = {Zeppelzauer, Matthias}, year = {2006}, keywords = {Amplitude Descriptors, Animal Sounds, Audio classification, Center for Artificial Intelligence, Content-based Audio Retrieval, Extern, FH SP Data Analytics \& Visual Computing, Institut für Creative Media Technologies, LoHAS, MFCC, Media Computing Group, SVM, Sound Classification, Sound Recognition, Temporal Audio Features, Wiss. Beitrag, pattern recognition}, } @phdthesis{zeppelzauer_syntactic_2011, type = {{PhD} {Thesis}}, title = {Syntactic and {Semantic} {Concepts} in {Audio}-{Visual} {Media}}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.228.6825&rep=rep1&type=pdf}, school = {Vienna University of Technology}, author = {Zeppelzauer, Matthias and Mitrović, Dalibor}, year = {2011}, keywords = {Automated Film Analysis, Center for Artificial Intelligence, Computer Vision, Content-based Audio Retrieval, Content-based Video Retrieval, Cross-Modal Correlation, Cross-Modal Retrieval, Dziga Vertov, Extern, FH SP Data Analytics \& Visual Computing, Historical Documentaries, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Motion Retrieval, Multimodal Retrieval, Scene segmentation, Visual Composition Retrieval, Wiss. Beitrag, black-frame detection, intertitle detection, pattern recognition, shot cut detection}, } @misc{zeppelzauer_carneval_2016, address = {Vienna, Austria}, type = {Invited talk}, title = {Carneval in {Rio} or {St}. {Patricks} {Day}? {Detecting} {Events} in {Social} {Media}}, abstract = {The huge amount of data hosted on social media platforms like Flickr, Facebook, Twitter \& Co pose novel challenges for automatic data analysis, indexing, search, and retrieval. A significant portion of publicly shared content relates to social events like concerts, sports events, and festivals. Recently, the detection and retrieval of media content related to social events has gained increasing attention in the research community as well as in industry (social media providers). In this talk I will introduce and review automatic techniques for social event analysis. Starting with techniques to separate event-related from non-event-related content (event relevance filtering), I will present techniques for the extraction of data that belongs to the same event (social event clustering) and for the classification of events according to their type (e.g. sports, music festival etc.). What complicates these analyses is that media content related to social events is usually distributed across different media platforms (e.g. Flickr and Youtube). To account for this, recent work in the context of social events focuses on the linking of data across platforms (cross-platform linking). Once social events are extracted and classified, higher-level tasks become possible, such as event retrieval, which deals with the identification of a specific events matching a user-defined search query, such as “Find all music events that took place in Vienna in 2016”. Another important task is to recognize event-related media content (event recognition) to answer questions like “does this image show the carnival in Rio?”. The talk will summarize recent methods and research results in the area of social media analysis and discuss related future research directions.}, language = {English}, author = {Zeppelzauer, Matthias}, month = may, year = {2016}, keywords = {2016, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Invited Talk, Machine Learning, Media Computing Group, Präsentation, Wiss. Beitrag}, } @misc{zeppelzauer_acoustic_2015, address = {St. Pölten, Austria}, title = {Acoustic {Wildlife} {Monitoring}}, abstract = {This talk gives an overview of novel algorithms developed for the automatic detection of animal sounds in the wild.}, author = {Zeppelzauer, Matthias}, month = nov, year = {2015}, keywords = {2015, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Poster, Publikationstyp Schriftpublikation, Schriftpublikation, Wiss. Beitrag}, } @misc{zeppelzauer_novel_2015, address = {Boston, Massachusetts, United States}, title = {A {Novel} {Annotation} {Tool} for {Complex} {Petroglyph} {Shapes}}, url = {http://mc.fhstp.ac.at/content/novel_annotation_tool_complex_petroglyph_shapes}, abstract = {We present a novel semi-automatic annotation tool for the construction of large real-world shape datasets. The tool enables the collaborative semi-automatic segmentation and annotation of shapes. Shapes are stored together with their annotations in a database and can be retrieved efficiently to construct custom shape datasets. The resulting datasets should stimulte further reasearch in the domain of shape recognition and matching.}, author = {Zeppelzauer, Matthias and Wieser, Ewald and Seidl, Markus}, month = jun, year = {2015}, note = {Projekt: PITOTI 3D}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Poster, Schriftpublikation, Wiss. Beitrag}, } @misc{zeppelzauer_multimedia_2014, address = {Jagiellonian University Krakau}, title = {Multimedia {Processing} – {From} {Ancient} {Rock} {Art} to {Social} {Web} {Media}}, url = {http://www.uj.edu.pl/en/wydzialy/wmii}, abstract = {The processing of multimedia content is a challenging topic that provides a wide range of possible applications. A major challenge lies in the highly heterogeneous characteristics of the underlying data that require specialized treatment in content-based analysis. This talk presents research performed in content-based retrieval of unusual and thus rarely investigated multimedia data. Presented topics include 3D surface and shape analysis of ancient rock art from high resolution 3D scans, object detection and tracking in unconstrained wildlife video, robust sound detection in bioacoustic monitoring, and detection of social events from web data.}, language = {Englisch}, author = {Zeppelzauer, Matthias}, month = apr, year = {2014}, keywords = {2014, Center for Artificial Intelligence, Computer Vision, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Präsentation, Wiss. Beitrag, peer-reviewed}, } @article{zeppelzauer_automated_2013, title = {Automated detection of elephants in wildlife video}, volume = {2013}, issn = {1687-5281}, url = {https://doi.org/10.1186/1687-5281-2013-46}, doi = {10/f3snb6}, abstract = {Biologists often have to investigate large amounts of video in behavioral studies of animals. These videos are usually not sufficiently indexed which makes the finding of objects of interest a time-consuming task. We propose a fully automated method for the detection and tracking of elephants in wildlife video which has been collected by biologists in the field. The method dynamically learns a color model of elephants from a few training images. Based on the color model, we localize elephants in video sequences with different backgrounds and lighting conditions. We exploit temporal clues from the video to improve the robustness of the approach and to obtain spatial and temporal consistent detections. The proposed method detects elephants (and groups of elephants) of different sizes and poses performing different activities. The method is robust to occlusions (e.g., by vegetation) and correctly handles camera motion and different lighting conditions. Experiments show that both near- and far-distant elephants can be detected and tracked reliably. The proposed method enables biologists efficient and direct access to their video collections which facilitates further behavioral and ecological studies. The method does not make hard constraints on the species of elephants themselves and is thus easily adaptable to other animal species.}, number = {1}, journal = {EURASIP Journal on Image and Video Processing}, author = {Zeppelzauer, Matthias}, month = aug, year = {2013}, keywords = {Center for Artificial Intelligence, Computer Vision, Extern, FH SP Data Analytics \& Visual Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Wiss. Beitrag}, pages = {46}, } @article{stoger_age_2014, title = {Age group estimation in free-ranging {African} elephants based on acoustic cues of low-frequency rumbles}, volume = {23}, journal = {Bioacoustics}, author = {Stöger, A. and Zeppelzauer, Matthias and Baotic, A.}, year = {2014}, keywords = {2014, Center for Artificial Intelligence, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Schriftpublikation, Wiss. Beitrag, peer-reviewed, ⛔ No DOI found}, } @inproceedings{zeppelzauer_novel_2015-1, address = {Boston, MA, USA}, title = {A {Novel} {Annotation} {Tool} for {Complex} {Petroglyph} {Shapes}}, abstract = {We present a novel semi-automatic annotation tool for the construction of large real-world shape datasets. The tool enables the collaborative semi-automatic segmentation and annotation of shapes. Shapes are stored together with their annotations in a database and can be retrieved efficiently to construct custom shape datasets. The resulting datasets should stimulte further reasearch in the domain of shape recognition and matching.}, booktitle = {The {Future} of {Datasets} in {Vision} {Workshop} (in conjunction with {CVPR} 2015)}, author = {Zeppelzauer, Matthias and Wieser, Ewald and Seidl, Markus}, year = {2015}, note = {Projekt: PITOTI 3D}, keywords = {2015, Center for Artificial Intelligence, Creative Industries, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Vortrag, Wiss. Beitrag, best, peer-reviewed, poster, ⛔ No DOI found}, } @inproceedings{zeppelzauer_novel_2010, address = {Chongqing, China}, title = {A {Novel} {Trajectory} {Clustering} {Approach} for {Motion} {Segmentation}}, url = {http://link.springer.com/chapter/10.1007/978-3-642-11301-7_44⋕page-1}, booktitle = {Proceedings of {Multimedia} {Modeling} {Conference}}, author = {Zeppelzauer, Matthias and Zaharieva, Maia and Mitrović, Dalibor and Breiteneder, C.}, year = {2010}, keywords = {Archive Film Analysis, Automated Film Analysis, Automatic Film Analysis, Center for Artificial Intelligence, Computer Vision, Content-based Video Retrieval, Dziga Vertov, Extern, FH SP Data Analytics \& Visual Computing, Historical Documentaries, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Motion Analysis, Motion Retrieval, Motion Segmentation, Motion Trajectory, Trajectory Clustering, Wiss. Beitrag, pattern recognition, ⛔ No DOI found}, pages = {433--443}, } @inproceedings{zaharieva_camera_2010, address = {Chongqing, China}, title = {Camera {Take} {Reconstruction}}, url = {http://link.springer.com/chapter/10.1007/978-3-642-11301-7_39⋕page-1}, booktitle = {Proceedings of {Multimedia} {Modeling} {Conference}}, author = {Zaharieva, Maia and Zeppelzauer, Matthias and Breiteneder, C. and Mitrović, Dalibor}, year = {2010}, keywords = {Archive Film Analysis, Automated Film Analysis, Camera Take Detection, Camera Takes, Center for Artificial Intelligence, Computer Vision, Content-based Video Retrieval, Continuity Analysis, Dziga Vertov, Edge Histogram, Extern, FH SP Data Analytics \& Visual Computing, Historical Documentaries, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Montage Reconstruction, Motion Smoothness, SIFT, Smoothness Analysis, Wiss. Beitrag, film montage, pattern recognition, ⛔ No DOI found}, pages = {379--388}, } @article{zeppelzauer_archive_2012, title = {Archive {Film} {Material} – {A} novel {Challenge} for {Automated} {Film} {Analysis}}, url = {http://framescinemajournal.com/article/archive-film-material-a-novel-challenge/}, number = {1}, journal = {Frames Cinema Journal}, author = {Zeppelzauer, Matthias and Mitrović, Dalibor and Breiteneder, C.}, editor = {Grant, Catherine}, year = {2012}, keywords = {Archive Film Analysis, Artifacts, Automated Film Analysis, Center for Artificial Intelligence, Computer Vision, Content-based Video Retrieval, Dziga Vertov, Extern, FH SP Data Analytics \& Visual Computing, Film artifacts, Film distortion, Flickr, Historical Documentaries, Institut für Creative Media Technologies, Long-time degradation, Machine Learning, Media Computing Group, Noise, Shaking, Wiss. Beitrag, challenges for automated analysis, film composition, film montage, pattern recognition, scratches, ⛔ No DOI found}, } @article{stoger_visualizing_2012, title = {Visualizing {Sound} {Emission} of {Elephant} {Vocalizations}: {Evidence} for {Two} {Rumble} {Production} {Types}}, volume = {7}, url = {http://dx.plos.org/10.1371/journal.pone.0048907}, number = {11:e48907}, journal = {Plos One}, author = {Stöger, A. and Heimann, G. and Zeppelzauer, Matthias and Ganswindt, A. and Hensman, S. and Charlton, B.}, year = {2012}, keywords = {Acoustic Camera, Aggregated LPC Spectral Features, Audio classification, Center for Artificial Intelligence, Content-based Audio Retrieval, Extern, FH SP Data Analytics \& Visual Computing, Formant Analysis, Infrasonic Sound Analysis, Institut für Creative Media Technologies, LPC, LPC Spectrogram, Linear Predictive Coding, Machine Learning, Media Computing Group, Sound Classification, Sound Recognition, Wiss. Beitrag, best-mzeppelzauer, pattern recognition, ⛔ No DOI found}, } @article{mitrovic_analysis_2007, title = {Analysis of the {Data} {Quality} of {Audio} {Descriptions} of {Environmental} {Sounds}}, volume = {5}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.68.1611&rep=rep1&type=pdf}, journal = {Journal of Digital Information Management}, author = {Mitrović, Dalibor and Zeppelzauer, Matthias and Eidenberger, H.}, year = {2007}, keywords = {Audio Features, Center for Artificial Intelligence, Content-based Audio Retrieval, Correlation Analysis, Extern, FH SP Data Analytics \& Visual Computing, Factor Loadings, Feature Combination, Institut für Creative Media Technologies, MFCC, MPEG-7 Descriptors, Machine Learning, Media Computing Group, PCA, Principal Components Analysis, Redundancy, Redundancy Analysis, Statistical Analysis, WALDI, Wiss. Beitrag, data quality, pattern recognition, ⛔ No DOI found}, pages = {48--55}, } @article{zaharieva_archive_2010, title = {Archive film comparison}, volume = {1}, url = {http://books.google.de/books?hl=de&lr=&id=m6yeBQAAQBAJ&oi=fnd&pg=PA188&dq=%22Archive+film+comparison%22&ots=y15ZvMfT2D&sig=U5nGBjFWvqnw5iEjvK8AmbwL6V0}, doi = {10/fwjqsc}, number = {3}, journal = {International Journal of Multimedia Data Engineering and Management}, author = {Zaharieva, Maia and Zeppelzauer, Matthias and Mitrović, Dalibor and Breiteneder, C.}, year = {2010}, keywords = {Archive Film Analysis, Automated Film Analysis, Automated Film Comparison, Center for Artificial Intelligence, Computer Vision, Content-based Video Retrieval, Dziga Vertov, Edge Histogram, Extern, FH SP Data Analytics \& Visual Computing, Film Versions, Historical Documentaries, Institut für Creative Media Technologies, MPEG-7 Descriptors, Machine Learning, Media Computing Group, SIFT, Wiss. Beitrag, pattern recognition}, pages = {41--56}, } @article{zeppelzauer_automatic_2009, title = {Automatic {Analysis} - {First} {Results} in {Shot} {Boundary} {Detection}}, volume = {55}, url = {http://www.degruyter.com/view/j/muk.2009.55.issue-3/muk.2009.55.3.45/muk.2009.55.3.45.xml}, doi = {10/gh3778}, number = {3}, journal = {Maske und Kothurn}, author = {Zeppelzauer, Matthias and Mitrović, Dalibor and Breiteneder, C.}, year = {2009}, keywords = {Archive Film, Archive Film Analysis, Artifacts, Automated Film Analysis, Center for Artificial Intelligence, Computer Vision, Content-based Video Retrieval, DCT, Dziga Vertov, Edge Histogram, Extern, FH SP Data Analytics \& Visual Computing, Feature Fusion, Flickr, Hard Cut Detection, Historical Documentaries, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Noise, Self-Similarity Matrix, Shaking, Shotcut Detection, Wiss. Beitrag, pattern recognition}, pages = {45--59}, } @article{zaharieva_film_2010, title = {Film {Analysis} of {Archive} {Documentaries}}, volume = {18}, url = {http://www.researchgate.net/profile/Matthias_Zeppelzauer/publication/224169972_Film_Analysis_of_Archived_Documentaries/links/00b49527a6cbb710da000000.pdf}, doi = {10/ds9kd8}, number = {2}, journal = {IEEE Multimedia}, author = {Zaharieva, Maia and Mitrović, Dalibor and Zeppelzauer, Matthias and Breiteneder, C.}, year = {2010}, keywords = {Archive Film Analysis, Automated Film Analysis, Automated Film Comparison, Center for Artificial Intelligence, Computer Vision, Content-based Video Retrieval, Dziga Vertov, Extern, FH SP Data Analytics \& Visual Computing, Historical Documentaries, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Motion Retrieval, Motion Segmentation, Noise Reduction, Signal Enhancement, Wiss. Beitrag, black-frame detection, camera take reconstruction, intertitle detection, motion composition, motion query, pattern recognition, retrieval of visual composition, shot-boundary detection, stabilization, visual composition analysis}, pages = {38--47}, } @inproceedings{zaharieva_finding_2009, address = {San Diego, USA}, title = {Finding the {Missing} {Piece}: {Content}-{Based} {Video} {Comparison}}, url = {http://publik.tuwien.ac.at/files/PubDat_180470.pdf}, doi = {10/ff4dpf}, booktitle = {Proceedings of the 11th {IEEE} {International} {Symposium} on {Multimedia} ({ISM} 2009)}, author = {Zaharieva, Maia and Zeppelzauer, Matthias and Mitrović, Dalibor and Breiteneder, C.}, year = {2009}, keywords = {Archive Film Analysis, Automated Film Analysis, Automated Film Comparison, Center for Artificial Intelligence, Computer Vision, Content-based Video Retrieval, Dziga Vertov, Edge Histogram, Extern, FH SP Data Analytics \& Visual Computing, Film Versions, Historical Documentaries, Institut für Creative Media Technologies, MPEG-7 Descriptors, Machine Learning, Media Computing Group, SIFT, Wiss. Beitrag, pattern recognition}, pages = {330--335}, } @inproceedings{zaharieva_automated_2013, address = {Dallas, Texas}, title = {Automated social event detection in large photo collections}, isbn = {978-1-4503-2033-7}, url = {http://dl.acm.org/citation.cfm?id=2461495}, doi = {10/gh3774}, booktitle = {Proceedings of the {International} {ACM} {Conference} on {Multimedia} {Retrieval}}, publisher = {ACM New York, NY, USA}, author = {Zaharieva, Maia and Zeppelzauer, Matthias and Breiteneder, C.}, year = {2013}, keywords = {Big Data Mining, Center for Artificial Intelligence, Clustering, Computer Vision, Datamining, Extern, FH SP Data Analytics \& Visual Computing, Flickr, Image Clustering, Image Retrieval, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Multimodal Clustering, Social Event Detection, Social Event Retrieval, Social Media Mining, Text Retrieval, Web Media Mining, Wiss. Beitrag, pattern recognition, social media retrieval}, pages = {167--174}, } @inproceedings{zeppelzauer_acoustic_2013, address = {Barcelona, Spain}, title = {Acoustic {Detection} of {Elephant} {Presence} in {Noisy} {Environments}}, url = {http://dl.acm.org/citation.cfm?id=2509900}, doi = {10/gh3775}, booktitle = {Proceedings of the 2nd {ACM} {International} {Workshop} on {Multimedia} {Analysis} for {Ecological} {Data} (in conjunction with {ACM} {Multimedia} {Conference} 2013)}, author = {Zeppelzauer, Matthias and Stöger, A. and Breiteneder, C.}, year = {2013}, keywords = {Audio Segmentation, Audio classification, Center for Artificial Intelligence, Content-based Audio Retrieval, Elephant Sound Recognition, Extern, FH SP Data Analytics \& Visual Computing, Greenwood Function Cepstral Coefficients, Human-Elephant Conflict, Institut für Creative Media Technologies, Loxodonta africana, Machine Learning, Media Computing Group, Noise Reduction, Signal Enhancement, Sound Detection, Sound Enhancement, Sound Recognition, Spectral Enhancement, Tensor-based filtering, Wiss. Beitrag, pattern recognition}, pages = {3--8}, } @inproceedings{sageder_unsupervised_2014, address = {Philadelphia, Pennsylvania, USA}, title = {Unsupervised {Selection} of {Robust} {Audio} {Feature} {Subsets}}, url = {http://epubs.siam.org/doi/abs/10.1137/1.9781611973440.79}, doi = {10/gh3776}, booktitle = {In {Proceedings} of the {International} {Conference} on {Data} {Mining} ({SIAM})}, author = {Sageder, G. and Zaharieva, Maia and Zeppelzauer, Matthias}, year = {2014}, keywords = {Center for Artificial Intelligence, Content-based Audio Retrieval, Correlation Analysis, Cross-correlation analysis, Extern, FH SP Data Analytics \& Visual Computing, Feature Selection, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Unsupervised feature selection, Wiss. Beitrag, audio feature selection, canonical correlation, pattern recognition}, } @inproceedings{zeppelzauer_cross-modal_2011, address = {Maui, Hawaii, USA}, title = {Cross-{Modal} {Analysis} of {Audio}-{Visual} {Film} {Montage}}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.228.8357&rep=rep1&type=pdf}, doi = {10/b8tnsx}, booktitle = {Proceedings of 20th {International} {Conference} on {Computer} {Communications} and {Networks}}, author = {Zeppelzauer, Matthias and Mitrović, Dalibor and Breiteneder, C.}, year = {2011}, keywords = {Archive Film Analysis, Audio-Visual Correlation, Automated Film Analysis, BFCC, Center for Artificial Intelligence, Computer Vision, Correlation Analysis, Cross Modal Retrieval, Dziga Vertov, Extern, FH SP Data Analytics \& Visual Computing, Historical Documentaries, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Multimodal Retrieval, Onset Detection, Segmentation, Shotcut Detection, Synchronicity Analysis, Wiss. Beitrag, auditory onset detection, pattern recognition, salience, self-similarity analysis, sequence extraction, synchronous montage, temporal segmentation, visual onset detection}, } @article{zaharieva_cross-platform_2015, title = {Cross-{Platform} {Social} {Event} {Detection}}, volume = {22}, issn = {1070-986X}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7045414&tag=1}, doi = {10/gh3773}, abstract = {It is becoming more and more popular to share personal experiences on platforms such as Flickr and YouTube. Uploaded media is usually described by both technical and user-generated metadata that is commonly used for their access and retrieval. Thus, a crucial aspect in this context is the quality and reliability of provided metadata. The mining of media across sharing platforms bears the additional challenge about potential di⬚erences in the maintained metadata. In order to provide a baseline for further research, we perform a thorough evaluation of the usefulness of available metadata in the context of social event detection in both single media repository scenario and across di⬚erent platforms.}, number = {3}, journal = {IEEE Multimedia}, author = {Zaharieva, Maia and Del Fabro, Manfred and Zeppelzauer, Matthias}, month = jan, year = {2015}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {14}, } @inproceedings{zeppelzauer_efficient_2015, address = {Quebec, Canada}, title = {Efficient {Image}-{Space} {Extraction} and {Representation} of {3D} {Surface} {Topography}}, url = {http://arxiv.org/pdf/1504.08308v3.pdf}, doi = {10/ghp4kc}, booktitle = {Conference {Proceedings} of {ICIP} - {IEEE} {International} {Conference} on {Image} {Processing} 2015}, publisher = {IEEE}, author = {Zeppelzauer, Matthias and Seidl, Markus}, year = {2015}, note = {Projekt: PITOTI 3D}, keywords = {2015, 3D descriptors, 3D surface analysis, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Image processing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Surface microstructure, Topography classification, Vortrag, Wiss. Beitrag, best-lbseidl, peer-reviewed}, pages = {2845--2849}, } @article{wieser_study_2016, title = {A {Study} on {Skeletonization} of {Complex} {Petroglyph} {Shapes}}, issn = {1573-7721}, url = {http://link.springer.com/article/10.1007/s11042-016-3395-1}, doi = {10/ghpp2r}, abstract = {In this paper, we present a study on skeletonization of real-world shape data. The data stem from the cultural heritage domain and represent contact tracings of prehistoric petroglyphs. Automated analysis can support the work of archeologists on the investigation and categorization of petroglyphs. One strategy to describe petroglyph shapes is skeleton-based. The skeletonization of petroglyphs is challenging since their shapes are complex, contain numerous holes and are often incomplete or disconnected. Thus they pose an interesting testbed for skeletonization. We present a large real-world dataset consisting of more than 1100 petroglyph shapes. We investigate their properties and requirements for the purpose of skeletonization, and evaluate the applicability of state-of-the-art skeletonization and skeleton pruning algorithms on this type of data. Experiments show that pre-processing of the shapes is crucial to obtain robust skeletons. We propose an adaptive pre-processing method for petroglyph shapes and improve several state-of-the-art skeletonization algorithms to make them suitable for the complex material. Evaluations on our dataset show that 79.8 \% of all shapes can be improved by the proposed pre-processing techniques and are thus better suited for subsequent skeletonization. Furthermore we observe that a thinning of the shapes produces robust skeletons for 83.5 \% of our shapes and outperforms more sophisticated skeletonization techniques.}, journal = {Multimedia Tools and Applications (Springer)}, author = {Wieser, Ewald and Seidl, Markus and Zeppelzauer, Matthias}, year = {2016}, note = {Projekt: PITOTI 3D}, keywords = {2016, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Schriftpublikation, Real-world shape data, Shape pre-processing, Skeletionization, Skeletonization, Wiss. Beitrag, best, peer-reviewed, petroglyphs}, pages = {1--19}, } @article{zeppelzauer_multimodal_2016, title = {Multimodal classification of events in social media}, issn = {0262-8856}, url = {https://arxiv.org/pdf/1601.00599}, doi = {10/ghpp2q}, abstract = {Abstract A large amount of social media hosted on platforms like Flickr and Instagram is related to social events. The task of social event classification refers to the distinction of event and non-event-related contents as well as the classification of event types (e.g. sports events and concerts). In this paper, we provide an extensive study of textual, visual, as well as multimodal representations for social event classification. We investigate the strengths and weaknesses of the modalities and study the synergy effects between the modalities. Experimental results obtained with our multimodal representation outperform state-of-the-art methods and provide a new baseline for future research.}, journal = {Image and Vision Computing}, author = {Zeppelzauer, Matthias and Schopfhauser, Daniel}, year = {2016}, keywords = {2016, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Multimodal retrieval, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, } @inproceedings{seidl_study_2010, address = {Firenze, Italy}, title = {A study of gradual transition detection in historic film material}, url = {http://portal.acm.org/citation.cfm?id=1877929}, doi = {10/bk2dng}, urldate = {2010-11-12}, booktitle = {Proceedings of the second workshop on {eHeritage} and digital art preservation - {eHeritage} '10}, author = {Seidl, Markus and Zeppelzauer, Matthias and Breiteneder, Christian}, year = {2010}, keywords = {Center for Artificial Intelligence, Computer Vision, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Wiss. Beitrag, peer-reviewed}, pages = {13}, } @article{zeppelzauer_establishing_2015, title = {Establishing the fundamentals for an elephant early warning and monitoring system}, volume = {8:409}, url = {http://www.biomedcentral.com/content/pdf/s13104-015-1370-y.pdf}, doi = {10/gb3pth}, abstract = {The decline of habitat for elephants due to expanding human activity is a serious conservation problem. This has continuously escalated the human–elephant conflict in Africa and Asia. Elephants make extensive use of powerful infrasonic calls (rumbles) that travel distances of up to several kilometers. This makes elephants well-suited for acoustic monitoring because it enables detecting elephants even if they are out of sight. In sight, their distinct visual appearance makes them a good candidate for visual monitoring. We provide an integrated overview of our interdisciplinary project that established the scientific fundamentals for a future early warning and monitoring system for humans who regularly experience serious conflict with elephants. We first draw the big picture of an early warning and monitoring system, then review the developed solutions for automatic acoustic and visual detection, discuss specific challenges and present open future work necessary to build a robust and reliable early warning and monitoring system that is able to operate in situ. We present a method for the automated detection of elephant rumbles that is robust to the diverse noise sources present in situ. We evaluated the method on an extensive set of audio data recorded under natural field conditions. Results show that the proposed method outperforms existing approaches and accurately detects elephant rumbles. Our visual detection method shows that tracking elephants in wildlife videos (of different sizes and postures) is feasible and particularly robust at near distances. From our project results we draw a number of conclusions that are discussed and summarized. We clearly identified the most critical challenges and necessary improvements of the proposed detection methods and conclude that our findings have the potential to form the basis for a future automated early warning system for elephants. We discuss challenges that need to be solved and summarize open topics in the context of a future early warning and monitoring system. We conclude that a long-term evaluation of the presented methods in situ using real-time prototypes is the most important next step to transfer the developed methods into practical implementation.}, journal = {BMC Research Notes}, author = {Zeppelzauer, Matthias and Stöger, A.}, month = sep, year = {2015}, keywords = {2015, Acoustic monitoring, Audio Analysis, Automatic call detection, Call classification, Center for Artificial Intelligence, Classification, Creative Industries, Department Medien und Digitale Technologien, Department Technologie, Elephants, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Loxodonta africana, Machine Learning, Media Computing Group, Noise Reduction, Object Detection, Pattern recognition, Publikationstyp Schriftpublikation, Signal Enhancement, Video Analysis, Visual monitoring, Visual tracking, Vocalizations, Wiss. Beitrag, best, human–elephant conflict, peer-reviewed}, pages = {15}, } @article{zeppelzauer_retrieval_2011, title = {Retrieval of motion composition in film}, volume = {22}, issn = {1462-6268}, url = {http://dx.doi.org/10.1080/14626268.2011.622282}, doi = {10/fxp886}, abstract = {This article presents methods for the automatic retrieval of motion and motion compositions in movies. We introduce a user-friendly sketch-based query interface that enables the user to describe desired motion compositions. Based on this abstract description, a tolerant matching scheme extracts shots from a movie with a similar composition. We investigate and evaluate two application scenarios: the retrieval of motion compositions and the retrieval of matching motions (a technique in continuity editing). Experiments show that the developed methods accurately and promptly retrieve relevant shots. The presented methods enable new ways of searching and investigating movies.}, number = {4}, urldate = {2014-09-11}, journal = {Digital Creativity}, author = {Zeppelzauer, Matthias and Zaharieva, Maia and Mitrović, Dalibor and Breiteneder, Christian}, year = {2011}, keywords = {Center for Artificial Intelligence, Computer Vision, Extern, FH SP Data Analytics \& Visual Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Wiss. Beitrag}, pages = {219--234}, } @article{seidl_gradual_2011, title = {Gradual transition detection in historic film material —a systematic study}, volume = {4}, issn = {1556-4673}, url = {http://doi.acm.org/10.1145/2069276.2069279}, doi = {10/fzsqr8}, number = {3}, journal = {J. Comput. Cult. Herit.}, author = {Seidl, Markus and Zeppelzauer, Matthias and Mitrović, Dalibor and Breiteneder, Christian}, year = {2011}, keywords = {Center for Artificial Intelligence, Computer Vision, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-lbseidl, peer-reviewed}, pages = {10:1--10:18}, } @inproceedings{zeppelzauer_topological_2016, address = {Marseilles, France}, title = {Topological descriptors for {3D} surface analysis}, volume = {9667}, doi = {10/gh377g}, booktitle = {In {Proceedings} of 6th {International} {Workshop} on {Computational} {Topology} in {Image} {Context}}, publisher = {Springer}, author = {Zeppelzauer, Matthias and Zielinski, Bartosz and Juda, Mateusz and Seidl, Markus}, year = {2016}, note = {Projekt: PITOTI 3D}, keywords = {2016, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Wiss. Beitrag, peer-reviewed}, pages = {77--87}, } @inproceedings{blumenstein_interactive_2015, address = {Rostock, Germany}, title = {Interactive {Data} {Visualization} for {Second} {Screen} {Applications}: {State} of the {Art} and {Technical} {Challenges}}, isbn = {978-3-8396-0960-6}, url = {https://research.fhstp.ac.at/content/download/128715/file/Blumenstein_et_al_2015_Interactive_Data_Visualization_for_Second_Screen.pdf?inLanguage=ger-DE}, abstract = {While second screen scenarios - that is, simultaneously using a phone, tablet or laptop while watching TV or a recorded broadcast - are finding their ways into the homes of millions of people, our understanding of how to properly design them is still very limited. We envision this design space and investigate how interactive data visualization can be leveraged in a second screen context. We concentrate on the state of the art in the affected areas of this topic and define technical challenges and opportunities which have to be solved for developing second screen applications including data visualization in the future.}, booktitle = {Proceedings of the {International} {Summer} {School} on {Visual} {Computing}}, publisher = {Frauenhoferverlag}, author = {Blumenstein, Kerstin and Wagner, Markus and Aigner, Wolfgang and von Suess, Rosa and Prochaska, Harald and Püringer, Julia and Zeppelzauer, Matthias and Sedlmair, Michael}, editor = {Schulz, Hans-Jörg and Urban, Bodo and Freiherr von Lukas, Uwe}, month = aug, year = {2015}, note = {Projekt: KAVA-Time Projekt: VALID}, keywords = {2015, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Media Computing Group, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, SP MW Global Media Markets \& Local Practices, Visual analytics, Wiss. Beitrag, peer-reviewed, visualization}, pages = {35--48}, } @inproceedings{salvador_cultural_2015, address = {Boston, Massachusetts, United States}, title = {Cultural {Event} {Recognition} with {Visual} {ConvNets} and {Temporal} {Models}}, url = {http://arxiv.org/abs/1504.06567}, abstract = {This paper presents our contribution to the ChaLearn Challenge 2015 on Cultural Event Classification. The challenge in this task is to automatically classify images from 50 different cultural events. Our solution is based on the combination of visual features extracted from convolutional neural networks with temporal information using a hierarchical classifier scheme. We extract visual features from the last three fully connected layers of both CaffeNet (pretrained with ImageNet) and our fine tuned version for the ChaLearn challenge. We propose a late fusion strategy that trains a separate low-level SVM on each of the extracted neural codes. The class predictions of the low-level SVMs form the input to a higher level SVM, which gives the final event scores. We achieve our best result by adding a temporal refinement step into our classification scheme, which is applied directly to the output of each low-level SVM. Our approach penalizes high classification scores based on visual features when their time stamp does not match well an event-specific temporal distribution learned from the training and validation data. Our system achieved the second best result in the ChaLearn Challenge 2015 on Cultural Event Classification with a mean average precision of 0.767 on the test set.}, booktitle = {Proceedings of the {CVPR} {Workshop} {ChaLearn} {Looking} at {People} 2015}, publisher = {IEEE}, author = {Salvador, Amaia and Zeppelzauer, Matthias and Manchón-Vizuente, Daniel and Calafell, Andrea and Giró-i-Nieto, Xavier}, month = apr, year = {2015}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed, visual computing}, } @inproceedings{zeppelzauer_interactive_2015, address = {Granada, Spain}, title = {Interactive {Segmentation} of {Rock}-{Art} in {High}-{Resolution} {3D} {Reconstructions}}, booktitle = {Conference {Proceedings} of {Digital} {Heritage} 2015 {Full} {Papers}}, author = {Zeppelzauer, Matthias and Poier, Georg and Seidl, Markus and Reinbacher, Christian and Breiteneder, Christian and Bischof, Horst}, month = oct, year = {2015}, note = {Projekt: PITOTI 3D}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Vortrag, Wiss. Beitrag, best, peer-reviewed}, } @inproceedings{zaharieva_social_2015, address = {Shanghai, China}, title = {Social {Event} {Mining} in {Large} {Photo} {Collections}}, abstract = {A significant part of publicly available photos on the Internet depicts a variety of different social events. In order to organize this steadily growing media content and to make it easily accessible, novel indexing methods are required. Essential research questions in this context concern the efficient detection (clustering), classification, and retrieval of social events in large media collections. In this paper we explore two aspects of social events mining. First, the initial clustering of a given photo collection into single events and, second, the retrieval of relevant social events based on user queries. For both aspects we employ commonly available metadata information, such as user, time, GPS data, and user-generated textual descriptions. Performed evaluations in the context of social event detection demonstrate the strong generalization ability of our approach and the potential of contextual data such as time, user, and location. Experiments with social event retrieval clearly indicate the open challenge of mapping between previously detected event clusters and heterogeneous user queries.}, booktitle = {Proceedings of the {International} {Conference} on {Multimedia} {Retrieval}}, publisher = {ACM Press}, author = {Zaharieva, Maia and Zeppelzauer, Matthias and Del Fabro, Manfred and Schopfhauser, Daniel}, month = mar, year = {2015}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed, visual computing}, } @inproceedings{agapito_graph-based_2015, series = {Lecture {Notes} in {Computer} {Science}}, title = {Graph-{Based} {Shape} {Similarity} of {Petroglyphs}}, volume = {8925}, isbn = {978-3-319-16177-8}, url = {http://dx.doi.org/10.1007/978-3-319-16178-5_9}, language = {English}, booktitle = {Computer {Vision} - {ECCV} 2014 {Workshops}}, publisher = {Springer International Publishing}, author = {Seidl, Markus and Wieser, Ewald and Zeppelzauer, Matthias and Pinz, Axel and Breiteneder, Christian}, editor = {Agapito, Lourdes and Bronstein, Michael M. and Rother, Carsten}, year = {2015}, note = {Projekt: PITOTI 3D}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Graph edit distance, Graph embedding, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Petroglyph similarity, Publikationstyp Schriftpublikation, Shape similarity, Vortrag, Wiss. Beitrag, best, best-lbseidl, graph matching, peer-reviewed, visual computing}, pages = {133--148}, } @inproceedings{zaharieva_clustering_2014, address = {Barcelona, Catalunya, Spain, October 16-17, 2014.}, title = {Clustering and {Retrieval} of {Social} {Events} in {Flickr}}, volume = {1043}, url = {http://wwwu.edu.uni-klu.ac.at/miriegle/mediaeval/mediaEval2014.html}, abstract = {This paper describes our contributions to the Social Event Detection (SED) task as part of the MediaEval Benchmark 2014. We first present an unsupervised approach for the clustering of social events that builds solely on provided metadata. Results show that already the use of available time and location information achieves high clustering precision. In the next step, we focus on the retrieval of previously clustered social events from queries by using temporal, spatial, and textual cues.}, language = {Englisch}, booktitle = {{CEUR} {Workshop} {Proceedings}}, author = {Zaharieva, Maia and Schopfhauser, Daniel and Del Fabro, Manfred and Zeppelzauer, Matthias}, month = oct, year = {2014}, keywords = {2014, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Schriftpublikation, Wiss. Beitrag, visual computing}, pages = {2}, } @inproceedings{zeppelzauer_unsupervised_2013, title = {Unsupervised {Clustering} of {Social} {Events}}, volume = {1043}, isbn = {1613-0073}, url = {http://ceur-ws.org/Vol-1043}, booktitle = {{CEUR} {Workshop} {Proceedings} of the {MediaEval} 2013 {Multimedia} {Benchmark} {Workshop}}, author = {Zeppelzauer, Matthias and Zaharieva, M. and Del Fabro, M.}, year = {2013}, keywords = {Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Schriftpublikation, Wiss. Beitrag, visual computing}, } @inproceedings{mitrovic_retrieval_2011, address = {Delft, NL}, title = {Retrieval of visual composition in film}, url = {https://www.ims.tuwien.ac.at/publications/tuw-196528.pdf}, booktitle = {{WIAMIS} 2011: 12th {International} {Workshop} on {Image} {Analysis} for {Multimedia} {Interactive} {Services}}, publisher = {TU Delft}, author = {Mitrović, Dalibor and Zeppelzauer, Matthias and Zaharieva, Maia and Breiteneder, Christian}, month = apr, year = {2011}, keywords = {Center for Artificial Intelligence, Computer Vision, Extern, FH SP Data Analytics \& Visual Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Wiss. Beitrag}, pages = {4}, } @inproceedings{mitrovic_scene_2010, title = {Scene {Segmentation} in {Artistic} {Archive} {Documentaries}}, volume = {6389}, isbn = {78-3-642-16606-8}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.228.2216&rep=rep1&type=pdf}, booktitle = {{HCI} in {Work} and {Learning}, {Life} and {Leisure}, {Lecture} {Notes} in {Computer} {Science}}, publisher = {Springer Berlin/Heidelberg}, author = {Mitrović, Dalibor and Hartlieb, S. and Zeppelzauer, Matthias and Zaharieva, Maia}, year = {2010}, keywords = {Archive Film Analysis, Automated Film Analysis, Center for Artificial Intelligence, Computer Vision, Content-based Video Retrieval, Dziga Vertov, Extern, FH SP Data Analytics \& Visual Computing, Film Scene, Film Segmentation, Historical Documentaries, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, SIFT, Scene segmentation, Segmentation, Shot boundary detection, Visual Features, Wiss. Beitrag, block-based histrogram, edge change ratio, pattern recognition, shot cut detection, temporal segmentation}, pages = {400--410}, } @inproceedings{mitrovic_feature_2009, address = {Zadar, Croatia}, title = {On {Feature} {Selection} in {Environmental} {Sound} {Recognition}}, isbn = {978-953-7044-10-7}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5342826}, booktitle = {Proceedings of the 51st {International} {Symposium} {ELMAR}-2009}, author = {Mitrović, Dalibor and Zeppelzauer, Matthias and Eidenberger, H.}, year = {2009}, keywords = {Audio classification, BFCC, Center for Artificial Intelligence, Content-based Audio Retrieval, Correlation Analysis, Extern, FH SP Data Analytics \& Visual Computing, Factor Loadings, Feature Combination, Feature Selection, Institut für Creative Media Technologies, MFCC, Machine Learning, Media Computing Group, PCA, Principal Components Analysis, Redundancy, Redundancy Analysis, SVM, Sound Recognition, Statistical Analysis, Wiss. Beitrag, cepstral features, data quality, pattern recognition, spectral features, supervised feature selection, temporal features}, pages = {201--204}, } @inproceedings{kropf_first_2007, address = {Siegen, Germany}, title = {First {Steps} {Towards} {Digital} {Formalism}: {The} {Vienna} {Vertov} {Collection}}, isbn = {978-3-8376-1023-9}, url = {http://publik.tuwien.ac.at/showentry.php?ID=179578&lang=1}, booktitle = {Proceedings of the {International} {Workshop} on {Digital} {Tools} in {Film} {Studies}}, author = {Kropf, V. and Zeppelzauer, Matthias and Hahn, S. and Mitrović, Dalibor}, year = {2007}, keywords = {Archive Film Analysis, Artifacts, Automated Film Analysis, Center for Artificial Intelligence, Computer Vision, Content-based Video Retrieval, Dziga Vertov, Extern, FH SP Data Analytics \& Visual Computing, Film artifacts, Film distortion, Historical Documentaries, Institut für Creative Media Technologies, Long-time degradation, Machine Learning, Media Computing Group, Wiss. Beitrag, challenges for automated analysis, pattern recognition, scratches}, pages = {117--131}, } @inproceedings{zeppelzauer_analysis_2008, address = {Klagenfurt, Austria}, title = {Analysis of {Historical} {Artistic} {Documentaries}}, isbn = {978-0-7695-3130-4}, url = {http://www.researchgate.net/profile/Matthias_Zeppelzauer/publication/4350166_Analysis_of_Historical_Artistic_Documentaries/links/09e41507e7aeea59fd000000.pdf}, booktitle = {Proceedings of the 9th {International} {Workshop} on {Image} {Analysis} for {Multimedia} {Interactive} {Services}}, author = {Zeppelzauer, Matthias and Mitrović, Dalibor and Breiteneder, C.}, year = {2008}, keywords = {Archive Film, Archive Film Analysis, Artifacts, Automated Film Analysis, Center for Artificial Intelligence, Computer Vision, Content-based Video Retrieval, DCT, Dziga Vertov, Edge Histogram, Extern, FH SP Data Analytics \& Visual Computing, Feature Fusion, Flickr, Hard Cut Detection, Historical Documentaries, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Noise, Self-Similarity Matrix, Shaking, Shotcut Detection, Wiss. Beitrag, pattern recognition}, pages = {201--206}, } @inproceedings{zeppelzauer_generic_2012, title = {A {Generic} {Approach} for {Social} {Event} {Detection} in {Large} {Photo} {Collections}}, isbn = {1613-0073}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.416.9676&rep=rep1&type=pdf}, booktitle = {In {CEUR} {Workshop} {Proceedings} of the {MediaEval} 2012 {Multimedia} {Benchmark} {Workshop}}, author = {Zeppelzauer, Matthias and Zaharieva, Maia and Breiteneder, C.}, year = {2012}, keywords = {Big Data Mining, Center for Artificial Intelligence, Clustering, Computer Vision, Datamining, Extern, FH SP Data Analytics \& Visual Computing, Flickr, Image Clustering, Image Retrieval, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Multimodal Clustering, Social Event Detection, Social Event Retrieval, Social Media Mining, Text Retrieval, Web Media Mining, Wiss. Beitrag, pattern recognition, social media retrieval}, } @incollection{mitrovic_features_2010, title = {Features for {Content}-{Based} {Audio} {Retrieval}}, volume = {78}, isbn = {978-0-12-381019-9}, url = {http://www.sciencedirect.com/science/article/pii/S0065245810780037}, booktitle = {Advances in {Computers}}, publisher = {Burlington: Academic Press}, author = {Mitrović, Dalibor and Zeppelzauer, Matthias and Breiteneder, C.}, year = {2010}, keywords = {Audio Descriptors, Audio Features, Audio classification, Center for Artificial Intelligence, Content-based Audio Features, Content-based Audio Retrieval, Extern, FH SP Data Analytics \& Visual Computing, Feature Design, Feature Taxonomy, Institut für Creative Media Technologies, Media Computing Group, Sound Recognition, Wiss. Beitrag, best-mzeppelzauer, cepstral features, pattern recognition, spectral features, survey, temporal features}, pages = {71--150}, } @inproceedings{mitrovic_discrimination_2006, address = {Beijing, China}, title = {Discrimination and {Retrieval} of {Animal} {Sounds}}, isbn = {1-4244-0029-5}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=1651344}, booktitle = {Proceedings of the {IEEE} {Conference} on {Multimedia} {Modeling}}, author = {Mitrović, Dalibor and Zeppelzauer, Matthias and Breiteneder, C.}, year = {2006}, keywords = {Amplitude Descriptors, Animal Sounds, Audio classification, Center for Artificial Intelligence, Content-based Audio Retrieval, Extern, FH SP Data Analytics \& Visual Computing, Institut für Creative Media Technologies, LoHAS, MFCC, Machine Learning, Media Computing Group, SVM, Sound Classification, Sound Recognition, Temporal Audio Features, Wiss. Beitrag, pattern recognition}, pages = {339--343}, } @inproceedings{lidon_upc-ub-stp_2015, address = {Wurzen, Germany}, title = {{UPC}-{UB}-{STP} @ {MediaEval} 2015 {Diversity} {Task}: {Iterative} {Reranking} of {Relevant} {Images}}, url = {https://imatge.upc.edu/web/sites/default/files/pub/cLidon.pdf}, abstract = {This paper presents the results of the UPC-UB-STP team in the 2015 MediaEval Retrieving Diverse Images Task. The goal of the challenge is to provide a ranked list of Flickr photos for a predefined set of queries. Our approach firstly generates a ranking of images based on a query-independent estimation of its relevance. Only top results are kept and iteratively re-ranked based on their intra-similarity to introduce diversity.}, booktitle = {{MediaEval} {Workshop}}, publisher = {CEUR Workshop Proceedings}, author = {Lidon, Aniol and Bolanos, Marc and Seidl, Markus and Giro-i-Nieto, Xavi and Radeva, Petia and Zeppelzauer, Matthias}, month = sep, year = {2015}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Conv Nets, Creative Industries, Deep Learning, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Flickr, Forschungsgruppe Media Computing, Image Analysis, Image retrieval, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, MediaEval, Neural Networks, Pattern recognition, Publikationstyp Schriftpublikation, Re-Ranking, Social Image Retrieval, Social Media Analysis, Wiss. Beitrag, peer-reviewed}, pages = {2}, }