@inproceedings{strebl_one-pixel_2022, title = {One-{Pixel} {Instance} {Segmentation} of {Leaves}}, isbn = {978-3-85125-954-4}, url = {https://openlib.tugraz.at/download.php?id=647869b0e83bf&location=browse}, doi = {10.3217/978-3-85125-954-4-06}, abstract = {The segmentation of plant leaves is an essential prerequisite for vision-based automated plant phenotyping applications like stress detection, measuring plant growth and detecting pests. Segmenting plant leaves is challenging due to occlusions, self-shadows, varying leaf shapes, poses and sizes and the presence of particularly fine structures. We present a novel leaf segmentation approach that takes single pixels as input to initialize the segmentation of leaves. Additionally, we introduce a new strategy for transfer learning that we call “tandem learning” which enables the integration of previously learned network representations into a structurally different network. We evaluate different configurations of our approach on publicly available data sets and show that it yields competitive segmentation results compared to more complex segmentation approaches.}, booktitle = {Proceedings of the {Workshop} of the {Austrian} {Association} for {Pattern} {Recognition}}, publisher = {TU Graz}, author = {Strebl, Julia and Stumpe, Eric and Baumhauer, Thomas and Kernstock, Lena and Seidl, Markus and Zeppelzauer, Matthias}, year = {2022}, note = {Projekt: PlantAI}, keywords = {Center for Artificial Intelligence, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Vortrag, Wiss. Beitrag, peer-reviewed, ⛔ No DOI found}, pages = {6}, } @inproceedings{stumpe_real_2021, title = {Real {Estate} {Attribute} {Prediction} from {Multiple} {Visual} {Modalities} with {Missing} {Data}}, isbn = {978-3-85125-869-1}, url = {https://openlib.tugraz.at/download.php?id=621f3007967e9&location=browse}, doi = {10.3217/978-3-85125-869-1-06}, abstract = {The assessment and valuation of real estate requires large datasets with real estate information. Unfortunately, real estate databases are usually sparse in practice, i.e.,not for each property every important attribute is available.In this paper, we study the potential of predicting high-level real estate attributes from visual data, specifically from twovisual modalities, namely indoor (interior) and outdoor (facade)photos. We design three models using different multimodalfusion strategies and evaluate them for three different use cases.Thereby, a particular challenge is to handle missing modalities. We evaluate different fusion strategies, present baselines forthe different prediction tasks, and find that enriching thetraining data with additional incomplete samples can lead to animprovement in prediction accuracy. Furthermore, the fusionof information from indoor and outdoor photos results in aperformance boost of up to 5\% in Macro F1-score.}, booktitle = {Proceedings of the {Workshop} of the {Austrian} {Association} for {Pattern} {Recognition}}, publisher = {TU Graz}, author = {Stumpe, Eric and Despotovic, Miroslav and Zedong, Zhang and Zeppelzauer, Matthias}, month = may, year = {2021}, keywords = {Center for Artificial Intelligence, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Wiss. Beitrag, best-estumpe, peer-reviewed, ⛔ No DOI found}, pages = {6}, } @misc{rind_wie_2022, address = {St. Pölten}, type = {Demo \& {Poster}}, title = {Wie können wir {Daten} erlebbar machen?}, abstract = {Durch die steigende Digitalisierung vermehren sich die verfügbaren Daten rasant. Doch was können sie uns über unsere Welt verraten? Dazu sind anschauliche Darstellungen notwendig, um große Datenmengen leichter begreifbar zu machen. Hier erfahren Sie, mit welchen visuellen, klanglichen und physikalischen Methoden Daten dargestellt werden können und wie künstliche Intelligenz dazu beiträgt. In welchen Medien inseriert das Bundeskanzleramt? Welche Länder exportieren die meisten Waffensysteme? Wie kann man nachvollziehen, was das neuronale Netzwerke eines Fahrerassistenzsystems gelernt hat? Visuelle, klangliche und physikalische Darstellungen helfen, große Datenmengen leichter begreifbar zu machen und Schlüsse daraus zu ziehen. Dabei kommt auch künstliche Intelligenz ins Spiel. Das Programm „netflower“, mit dem man Geldflüsse zwischen öffentlichen Institutionen und Medien übersichtlich darstellen und vergleichen kann, ist eines der Beispiele für interaktive Visualisierungen bei unserer Station. Das sind bildliche Darstellungen von Daten, bei denen die Betrachter:innen selbst entscheiden können, welche Teilbereiche sie gerade ansehen oder noch genauer erforschen möchten. Aber auch über andere Sinne können die Besucher:innen bei dieser Station Daten als Sonifikation hören und als Physikalisierung begreifen. Darüber hinaus wird gezeigt, wie Data Comics den Einstieg in die Datenanalyse erleichtern, Sensordaten in Extended Reality visualisiert werden, KI das Onboarding am Arbeitsplatz unterstützt und neuronale Netzwerke verständlich gemacht werden.}, author = {Rind, Alexander and Böck, Jaqueline and Boucher, Magdalena and Grassinger, Florian and Kirchknopf, Armin and Stoiber, Christina and Stumpe, Eric and Zauchinger, Michael}, year = {2022}, note = {Projekt: SoniVis Projekt: SEVA Projekt: VALID}, keywords = {Demo, Department Medien und Digitale Technologien, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Poster, Visualization}, } @inproceedings{despotovic_ensemble_2021, title = {Ensemble {Decision}-{Based} {Annotation} of {Unconstrained} {Real} {Estate} {Images}}, abstract = {We describe a proof-of-concept for annotating real estate images using simple iterative rule-based semi-supervised learning. In this study, we gained important insights into the discriminant power of individual image classes as well as requirements for a practical implementation.}, booktitle = {Proceedings of the {Workshop} of the {Austrian} {Association} for {Pattern} {Recognition}}, publisher = {TU Graz}, author = {Despotovic, Miroslav and Zhang, Zedong and Stumpe, Eric and Zeppelzauer, Matthias}, month = may, year = {2021}, keywords = {Center for Artificial Intelligence, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Wiss. Beitrag, peer-reviewed, ⛔ No DOI found}, pages = {2}, }