@inproceedings{stoiber_abstract_2022, address = {Chur, Switzerland}, title = {Abstract and {Concrete} {Materials}: {What} to use for {Visualization} {Onboarding}}, doi = {10.1145/3554944.3554949}, booktitle = {The 15th {International} {Symposium} on {Visual} {Information} {Communication} and {Interaction} ({Vinci} 2022)}, publisher = {Association for Computing Machinery}, author = {Stoiber, Christina and Grassinger, Florian and Aigner, Wolfgang}, year = {2022}, note = {Projekt: SEVA Projekt: Vis4Schools}, keywords = {FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Human-Computer Interaction, Institut für Creative Media Technologies, Visual Computing, Vortrag, Wiss. Beitrag, best-stoiber, peer-reviewed}, } @inproceedings{stoiber_design_2021, address = {Potsdam, Germany}, title = {Design and {Comparative} {Evaluation} of {Visualization} {Onboarding} {Methods}}, isbn = {978-1-4503-8647-0}, doi = {10.1145/3481549.3481558}, booktitle = {{VINCI}'21 - {Short} {Papers}}, publisher = {Association for Computing Machinery}, author = {Stoiber, Christina and Walchshofer, Conny and Grassinger, Florian and Sitz, Holger and Streit, Marc and Aigner, Wolfgang}, year = {2021}, note = {Projekt: SEVA}, keywords = {FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Human-Computer Interaction, Institut für Creative Media Technologies, Visual Computing, best-stoiber, peer-reviewed}, pages = {1--5}, } @article{stoiber_comparative_2022, title = {Comparative {Evaluations} of {Visualization} {Onboarding} {Methods}}, url = {http://arxiv.org/abs/2203.15418}, doi = {10.1016/j.visinf.2022.07.001}, journal = {Elsevier Journal of Visual Informatics}, author = {Stoiber, Christina and Walchshofer, Conny and Pohl, Margit and Potzmann, Benjamin and Grassinger, Florian and Stitz, Holger and Aigner, Wolfgang}, year = {2022}, note = {Projekt: SEVA}, keywords = {FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Human-Computer Interaction, Institut für Creative Media Technologies, Visual Computing, Wiss. Beitrag, best-stoiber, best-wagner, peer-reviewed}, } @inproceedings{stoiber_visualization_2019, address = {Vancouver, BC, Canada}, title = {Visualization {Onboarding}: {Learning} {How} to {Read} and {Use} {Visualizations}}, url = {https://osf.io/c38ab/}, doi = {10/gh38zd}, booktitle = {{IEEE} {Workshop} on {Visualization} for {Communication}}, publisher = {IEEE}, author = {Stoiber, Christina and Grassinger, Florian and Pohl, Margit and Stitz, Holger and Streit, Marc and Aigner, Wolfgang}, year = {2019}, note = {Projekt: VisOnFire}, keywords = {FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Visual Computing, Vortrag, Wiss. Beitrag, best-cniederer, best-cstoiber, peer-reviewed}, } @misc{rind_daten_2022, address = {St. Pölten}, type = {Demo \& {Poster}}, title = {Daten erlebbar machen: {Wie} {Daten} sichtbar, hörbar und greifbar werden}, abstract = {In welchen Medien inseriert das Bundeskanzleramt? Welche Länder exportieren die meisten Waffensysteme? Wie steht es um die psychische Gesundheit in EU-Ländern? Durch die steigende Digitalisierung vermehren sich die verfügbaren Daten rasant. Doch was können sie uns über unsere Welt verraten? Dazu sind anschauliche Darstellungen notwendig, um große Datenmengen leichter begreifbar zu machen. Hier erfahren Sie, mit welchen visuellen, klanglichen und physikalischen Methoden Daten dargestellt werden können. Das Programm „netflower“, mit dem man Geldflüsse zwischen öffentlichen Institutionen und Medien übersichtlich darstellen und vergleichen kann, ist eines der Beispiele für interaktive Visualisierungen bei unserer Station. Das sind bildliche Darstellungen von Daten, bei denen die Betrachter:innen selbst entscheiden können, welche Teilbereiche sie gerade ansehen oder noch genauer erforschen möchten. Aber auch über andere Sinne können die Besucher:innen bei dieser Station Daten als Sonifikation hören und als Physikalisierung begreifen. Darüber hinaus wird gezeigt, wie Data Comics den Einstieg in die Datenanalyse erleichtern und Sensordaten in Extended Reality visualisiert werden.}, author = {Rind, Alexander and Aigner, Wolfgang and Böck, Julia and Grassinger, Florian and Oliveira, Victor A. de. J. and Wu, Hsiang-Yun and Zauchinger, Michael}, month = sep, year = {2022}, note = {Projekt: SoniVis Projekt: SEVA Projekt: VALID Projekt: TransSoDia Projekt: Dataskop}, keywords = {Demo, Departement Medien und Digitale Technologien, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Poster, Visualization}, } @misc{rind_wie_2022, address = {St. Pölten}, type = {Demo \& {Poster}}, title = {Wie können wir {Daten} erlebbar machen?}, abstract = {Durch die steigende Digitalisierung vermehren sich die verfügbaren Daten rasant. Doch was können sie uns über unsere Welt verraten? Dazu sind anschauliche Darstellungen notwendig, um große Datenmengen leichter begreifbar zu machen. Hier erfahren Sie, mit welchen visuellen, klanglichen und physikalischen Methoden Daten dargestellt werden können und wie künstliche Intelligenz dazu beiträgt. In welchen Medien inseriert das Bundeskanzleramt? Welche Länder exportieren die meisten Waffensysteme? Wie kann man nachvollziehen, was das neuronale Netzwerke eines Fahrerassistenzsystems gelernt hat? Visuelle, klangliche und physikalische Darstellungen helfen, große Datenmengen leichter begreifbar zu machen und Schlüsse daraus zu ziehen. Dabei kommt auch künstliche Intelligenz ins Spiel. Das Programm „netflower“, mit dem man Geldflüsse zwischen öffentlichen Institutionen und Medien übersichtlich darstellen und vergleichen kann, ist eines der Beispiele für interaktive Visualisierungen bei unserer Station. Das sind bildliche Darstellungen von Daten, bei denen die Betrachter:innen selbst entscheiden können, welche Teilbereiche sie gerade ansehen oder noch genauer erforschen möchten. Aber auch über andere Sinne können die Besucher:innen bei dieser Station Daten als Sonifikation hören und als Physikalisierung begreifen. Darüber hinaus wird gezeigt, wie Data Comics den Einstieg in die Datenanalyse erleichtern, Sensordaten in Extended Reality visualisiert werden, KI das Onboarding am Arbeitsplatz unterstützt und neuronale Netzwerke verständlich gemacht werden.}, author = {Rind, Alexander and Böck, Jaqueline and Boucher, Magdalena and Grassinger, Florian and Kirchknopf, Armin and Stoiber, Christina and Stumpe, Eric and Zauchinger, Michael}, year = {2022}, note = {Projekt: SoniVis Projekt: SEVA Projekt: VALID}, keywords = {Demo, Department Medien und Digitale Technologien, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Poster, Visualization}, } @article{stoiber_netflower_2019, title = {netflower: {Dynamic} {Network} {Visualization} for {Data} {Journalists}}, volume = {38}, url = {https://phaidra.fhstp.ac.at/download/o:4838}, doi = {10/ghm4jz}, abstract = {Abstract Journalists need visual interfaces that cater to the exploratory nature of their investigative activities. In this paper, we report on a four-year design study with data journalists. The main result is netflower, a visual exploration tool that supports journalists in investigating quantitative flows in dynamic network data for story-finding. The visual metaphor is based on Sankey diagrams and has been extended to make it capable of processing large amounts of input data as well as network change over time. We followed a structured, iterative design process including requirement analysis and multiple design and prototyping iterations in close cooperation with journalists. To validate our concept and prototype, a workshop series and two diary studies were conducted with journalists. Our findings indicate that the prototype can be picked up quickly by journalists and valuable insights can be achieved in a few hours. The prototype can be accessed at: http://netflower.fhstp.ac.at/}, journal = {Computer Graphics Forum (EuroVis '19)}, author = {Stoiber, Christina and Rind, Alexander and Grassinger, Florian and Gutounig, Robert and Goldgruber, Eva and Sedlmair, Michael and Emrich, Stefan and Aigner, Wolfgang}, month = jun, year = {2019}, note = {Projekt: VALID Projekt: VisOnFire}, keywords = {FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Human-Computer Interaction, Institut für Creative Media Technologies, Visual Computing, Vortrag, Wiss. Beitrag, best, best-cniederer, best-cstoiber, best-fgrassinger, best-lbaigner, peer-reviewed}, } @inproceedings{reinsperger_location-based_2016, title = {Location-{Based} {Learning} {Games} {Made} {Easy}}, url = {http://mc.fhstp.ac.at/sites/default/files/publications/fmt-proceedings-2016-paper3.pdf}, booktitle = {Proceedings of the 9th {Forum} {Media} {Technology}}, publisher = {CEUR WS}, author = {Reinsperger, Simon and Grassinger, Florian and Miclaus, Iosif and Schmiedl, Birgit and Schmiedl, Grischa and Blumenstein, Kerstin}, year = {2016}, note = {Projekt: SEEKOI Projekt: Couragierte Gemeinde}, keywords = {2016, Department Medien und Digitale Technologien, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, peer-reviewed, ⛔ No DOI found}, pages = {23--31}, } @inproceedings{rind_contractvis_2018, title = {{ContractVis} {HighLighter}: the {Visual} {Assistant} for the {Fine} {Print}}, url = {http://mc.fhstp.ac.at/sites/default/files/publications/contractvis-highlighter-visual.pdf}, abstract = {Navigating and comprehending the legal text of web shops’ general terms and conditions is a burden for consumers. This poster abstract describes work-in-progress to design a visualization environment specifically addressing the needs of online shoppers. This environment highlights keywords of relevance (e.g., returning items), provides visual overview, and supports comparison of two texts.}, booktitle = {Proceedings of the {Posters} and {Demos} {Track} of the 14th {International} {Conference} on {Semantic} {Systems} - {SEMANTiCS2018}}, publisher = {CEUR-WS}, author = {Rind, Alexander and Grassinger, Florian and Kirchknopf, Armin and Stoiber, Christina and Özüyilmaz, Aslihan}, editor = {Khalili, Ali and Koutraki, Maria}, year = {2018}, note = {Projekt: ContractVis}, keywords = {FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Wiss. Beitrag, information visualization, peer-reviewed, personal context, text analytics, ⛔ No DOI found}, } @article{niederer_taco_2018, title = {{TACO}: {Visualizing} {Changes} in {Tables} {Over} {Time}}, volume = {24}, doi = {10/ghppzq}, abstract = {Multivariate, tabular data is one of the most common data structures used in many different domains. Over time, tables can undergo changes in both structure and content, which results in multiple versions of the same table. A challenging task when working with such derived tables is to understand what exactly has changed between versions in terms of additions/deletions, reorder, merge/split, and content changes. For textual data, a variety of commonplace "diff" tools exist that support the task of investigating changes between revisions of a text. Although there are some comparison tools which assist users in inspecting differences between multiple table instances, the resulting visualizations are often difficult to interpret or do not scale to large tables with thousands of rows and columns. To address these challenges, we developed TACO, an interactive comparison tool that visualizes effectively the differences between multiple tables at various levels of detail. With TACO we show (1) the aggregated differences between multiple table versions over time, (2) the aggregated changes between two selected table versions, and (3) detailed changes between the selection. To demonstrate the effectiveness of our approach, we show its application by means of two usage scenarios.}, number = {1}, journal = {IEEE Transactions on Visualization and Computer Graphics (InfoVis ’17)}, author = {Niederer, Christina and Stitz, Holger and Hourieh, Reem and Grassinger, Florian and Aigner, Wolfgang and Streit, Marc}, year = {2018}, note = {Projekt: VisOnFire}, keywords = {Center for Digital Health Innovation, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Wiss. Beitrag, best, best-cniederer, best-cstoiber, best-lbaigner, peer-reviewed}, pages = {677--686}, } @inproceedings{grassinger_lifestream:_2017, address = {St. Pölten}, title = {{LifeStream}: {Design} and prototypical implementation of a monitoring system for dispatch life support}, url = {http://mc.fhstp.ac.at/sites/default/files/publications/Grassinger_Lifestream_2017.pdf}, abstract = {Most laypersons who reanimate for the first time do it inappropriately. Until now the only way to review the ongoing reanimation was verbal feedback by the dispatcher on the phone, who has only limited resources in order to review the reanimation process. To overcome this issue, we designed and implemented LifeStream, a system using current smartphone technologies in order to measure reanimation parameters: chest compression rate (CCR) and chest compression depth (CCD). The system is based on a server, web client and mobile application, which gathers, processes and transfers the data. The development of algorithms for CCR and CCD detection as well as the evaluation of the system functionality is part of this paper. We conducted a 2-day user test, where we compared the guided standard reanimation process to the application supported process. The results of the tests showed that it is possible to develop an application, which runs for at least ten minutes (crucial time till ambulance arrives) and enhances the whole reanimation cycle for laypersons and dispatchers (Ljunggren et al., 2016).}, booktitle = {Proceedings of the 10th {Forum} {Media} {Technology} 2017}, publisher = {CEUR-WS}, author = {Grassinger, Florian and Doppler, Jakob and Wagner, Markus and Aigner, Wolfgang}, month = nov, year = {2017}, keywords = {2017, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, health-care, mobile, peer-reviewed, visualization}, pages = {41--45}, }