Skip to content

Instantly share code, notes, and snippets.

@alex-rind
Created February 27, 2017 09:50
Show Gist options
  • Select an option

  • Save alex-rind/ba446d670bddc06a1b49386b216dc986 to your computer and use it in GitHub Desktop.

Select an option

Save alex-rind/ba446d670bddc06a1b49386b216dc986 to your computer and use it in GitHub Desktop.
PubViz
license: mit
This file has been truncated, but you can view the full file.
@article{p3,
journal = {IEEE TVCG},
year = 2015,
title = {A comparative study between RadViz and Star Coordinates},
doi = {10.1109/TVCG.2015.2467324},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467324},
author = {Rubio-Sanchez, M. and Raya, L. and Diaz, F. and Sanchez, A.},
pages = {619--628},
keywords = {RadViz, Star coordinates, Exploratory data analysis, Cluster analysis, Classification, Outlier detection},
abstract = {RadViz and star coordinates are two of the most popular projection-based multivariate visualization techniques that arrange variables in radial layouts. Formally, the main difference between them consists of a nonlinear normalization step inherent in RadViz. In this paper we show that, although RadViz can be useful when analyzing sparse data, in general this design choice limits its applicability and introduces several drawbacks for exploratory data analysis. In particular, we observe that the normalization step introduces nonlinear distortions, can encumber outlier detection, prevents associating the plots with useful linear mappings, and impedes estimating original data attributes accurately. In addition, users have greater flexibility when choosing different layouts and views of the data in star coordinates. Therefore, we suggest that analysts and researchers should carefully consider whether RadViz's normalization step is beneficial regarding the data sets' characteristics and analysis tasks.},
}
@article{p4,
journal = {IEEE TVCG},
year = 2015,
title = {A Linguistic Approach to Categorical Color Assignment for Data Visualization},
doi = {10.1109/TVCG.2015.2467471},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467471},
author = {Setlur, V. and Stone, M.C.},
pages = {698--707},
keywords = {linguistics, natural language processing, semantics, color names, categorical color, Google n-grams, WordNet, XKCD},
abstract = {When data categories have strong color associations, it is useful to use these semantically meaningful concept-color associations in data visualizations. In this paper, we explore how linguistic information about the terms defining the data can be used to generate semantically meaningful colors. To do this effectively, we need first to establish that a term has a strong semantic color association, then discover which color or colors express it. Using co-occurrence measures of color name frequencies from Google n-grams, we define a measure for colorability that describes how strongly associated a given term is to any of a set of basic color terms. We then show how this colorability score can be used with additional semantic analysis to rank and retrieve a representative color from Google Images. Alternatively, we use symbolic relationships defined by WordNet to select identity colors for categories such as countries or brands. To create visually distinct color palettes, we use k-means clustering to create visually distinct sets, iteratively reassigning terms with multiple basic color associations as needed. This can be additionally constrained to use colors only in a predefined palette.},
}
@article{p5,
journal = {IEEE TVCG},
year = 2015,
title = {A Psychophysical Investigation of Size as a Physical Variable},
doi = {10.1109/TVCG.2015.2467951},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467951},
author = {Jansen, Y. and Hornbaek, K.},
pages = {479--488},
keywords = {Data physicalization, physical visualization, psychophysics, experiment, physical variable},
abstract = {Physical visualizations, or data physicalizations, encode data in attributes of physical shapes. Despite a considerable body of work on visual variables, ΓÇ£physical variablesΓÇ¥ remain poorly understood. One of them is physical size. A difficulty for solid elements is that ΓÇ£sizeΓÇ¥ is ambiguous - it can refer to either length/diameter, surface, or volume. Thus, it is unclear for designers of physicalizations how to effectively encode quantities in physical size. To investigate, we ran an experiment where participants estimated ratios between quantities represented by solid bars and spheres. Our results suggest that solid bars are compared based on their length, consistent with previous findings for 2D and 3D bars on flat media. But for spheres, participants' estimates are rather proportional to their surface. Depending on the estimation method used, judgments are rather consistent across participants, thus the use of perceptually-optimized size scales seems possible. We conclude by discussing implications for the design of data physicalizations and the need for more empirical studies on physical variables.},
}
@article{p6,
journal = {IEEE TVCG},
year = 2015,
title = {A Simple Approach for Boundary Improvement of Euler Diagrams},
doi = {10.1109/TVCG.2015.2467992},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467992},
author = {Simonetto, P. and Archambault, D. and Scheidegger, C.E.},
pages = {678--687},
keywords = {Euler diagrams, Boundary Improvement, Force-Directed Approaches},
abstract = {General methods for drawing Euler diagrams tend to generate irregular polygons. Yet, empirical evidence indicates that smoother contours make these diagrams easier to read. In this paper, we present a simple method to smooth the boundaries of any Euler diagram drawing. When refining the diagram, the method must ensure that set elements remain inside their appropriate boundaries and that no region is removed or created in the diagram. Our approach uses a force system that improves the diagram while at the same time ensuring its topological structure does not change. We demonstrate the effectiveness of the approach through case studies and quantitative evaluations.},
}
@article{p7,
journal = {IEEE TVCG},
year = 2015,
title = {Acquired Codes of Meaning in Data Visualization and Infographics: Beyond Perceptual Primitives},
doi = {10.1109/TVCG.2015.2467321},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467321},
author = {Byrne, L. and Angus, D. and Wiles, J.},
pages = {509--518},
keywords = {Visual Design, Taxonomies, Illustrative Visualization, Design Methodologies},
abstract = {While information visualization frameworks and heuristics have traditionally been reluctant to include acquired codes of meaning, designers are making use of them in a wide variety of ways. Acquired codes leverage a user's experience to understand the meaning of a visualization. They range from figurative visualizations which rely on the reader's recognition of shapes, to conventional arrangements of graphic elements which represent particular subjects. In this study, we used content analysis to codify acquired meaning in visualization. We applied the content analysis to a set of infographics and data visualizations which are exemplars of innovative and effective design. 88% of the infographics and 71% of data visualizations in the sample contain at least one use of figurative visualization. Conventions on the arrangement of graphics are also widespread in the sample. In particular, a comparison of representations of time and other quantitative data showed that conventions can be specific to a subject. These results suggest that there is a need for information visualization research to expand its scope beyond perceptual channels, to include social and culturally constructed meaning. Our paper demonstrates a viable method for identifying figurative techniques and graphic conventions and integrating them into heuristics for visualization design.},
}
@article{p8,
journal = {IEEE TVCG},
year = 2015,
title = {AggreSet: Rich and Scalable Set Exploration using Visualizations of Element Aggregations},
doi = {10.1109/TVCG.2015.2467051},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467051},
author = {Yalcin, M.A. and Elmqvist, N. and Bederson, B.B.},
pages = {688--697},
keywords = {Multi-valued attributes, sets, visualization, set visualization, data exploration, interaction, design, scalability},
abstract = {Datasets commonly include multi-value (set-typed) attributes that describe set memberships over elements, such as genres per movie or courses taken per student. Set-typed attributes describe rich relations across elements, sets, and the set intersections. Increasing the number of sets results in a combinatorial growth of relations and creates scalability challenges. Exploratory tasks (e.g. selection, comparison) have commonly been designed in separation for set-typed attributes, which reduces interface consistency. To improve on scalability and to support rich, contextual exploration of set-typed data, we present AggreSet. AggreSet creates aggregations for each data dimension: sets, set-degrees, set-pair intersections, and other attributes. It visualizes the element count per aggregate using a matrix plot for set-pair intersections, and histograms for set lists, set-degrees and other attributes. Its non-overlapping visual design is scalable to numerous and large sets. AggreSet supports selection, filtering, and comparison as core exploratory tasks. It allows analysis of set relations inluding subsets, disjoint sets and set intersection strength, and also features perceptual set ordering for detecting patterns in set matrices. Its interaction is designed for rich and rapid data exploration. We demonstrate results on a wide range of datasets from different domains with varying characteristics, and report on expert reviews and a case study using student enrollment and degree data with assistant deans at a major public university.},
}
@article{p9,
journal = {IEEE TVCG},
year = 2015,
title = {AmbiguityVis: Visualization of Ambiguity in Graph Layouts},
doi = {10.1109/TVCG.2015.2467691},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467691},
author = {Yong Wang and Qiaomu Shen and Archambault, D. and Zhiguang Zhou and Min Zhu and Sixiao Yang and Huamin Qu},
pages = {359--368},
keywords = {Visual Ambiguity, Visualization, Node-link diagram, Graph layout, Graph visualization},
abstract = {Node-link diagrams provide an intuitive way to explore networks and have inspired a large number of automated graph layout strategies that optimize aesthetic criteria. However, any particular drawing approach cannot fully satisfy all these criteria simultaneously, producing drawings with visual ambiguities that can impede the understanding of network structure. To bring attention to these potentially problematic areas present in the drawing, this paper presents a technique that highlights common types of visual ambiguities: ambiguous spatial relationships between nodes and edges, visual overlap between community structures, and ambiguity in edge bundling and metanodes. Metrics, including newly proposed metrics for abnormal edge lengths, visual overlap in community structures and node/edge aggregation, are proposed to quantify areas of ambiguity in the drawing. These metrics and others are then displayed using a heatmap-based visualization that provides visual feedback to developers of graph drawing and visualization approaches, allowing them to quickly identify misleading areas. The novel metrics and the heatmap-based visualization allow a user to explore ambiguities in graph layouts from multiple perspectives in order to make reasonable graph layout choices. The effectiveness of the technique is demonstrated through case studies and expert reviews.},
}
@article{p10,
journal = {IEEE TVCG},
year = 2015,
title = {Automatic Selection of Partitioning Variables for Small Multiple Displays},
doi = {10.1109/TVCG.2015.2467323},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467323},
author = {Anand, A. and Talbot, J.},
pages = {669--677},
keywords = {Small multiple displays, Visualization selection, Multidimensional data},
abstract = {Effective small multiple displays are created by partitioning a visualization on variables that reveal interesting conditional structure in the data. We propose a method that automatically ranks partitioning variables, allowing analysts to focus on the most promising small multiple displays. Our approach is based on a randomized, non-parametric permutation test, which allows us to handle a wide range of quality measures for visual patterns defined on many different visualization types, while discounting spurious patterns. We demonstrate the effectiveness of our approach on scatterplots of real-world, multidimensional datasets.},
}
@article{p11,
journal = {IEEE TVCG},
year = 2015,
title = {Beyond Memorability: Visualization Recognition and Recall},
doi = {10.1109/TVCG.2015.2467732},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467732},
author = {Borkin, M. and Bylinskii, Z. and Nam Wook Kim and Bainbridge, C.M. and Yeh, C.S. and Borkin, D. and Pfister, H. and Oliva, A.},
pages = {519--528},
keywords = {Information visualization, memorability, recognition, recall, eye-tracking study},
abstract = {In this paper we move beyond memorability and investigate how visualizations are recognized and recalled. For this study we labeled a dataset of 393 visualizations and analyzed the eye movements of 33 participants as well as thousands of participant-generated text descriptions of the visualizations. This allowed us to determine what components of a visualization attract people's attention, and what information is encoded into memory. Our findings quantitatively support many conventional qualitative design guidelines, including that (1) titles and supporting text should convey the message of a visualization, (2) if used appropriately, pictograms do not interfere with understanding and can improve recognition, and (3) redundancy helps effectively communicate the message. Importantly, we show that visualizations memorable ΓÇ£at-a-glanceΓÇ¥ are also capable of effectively conveying the message of the visualization. Thus, a memorable visualization is often also an effective one.},
}
@article{p12,
journal = {IEEE TVCG},
year = 2015,
title = {Beyond Weber's Law: A Second Look at Ranking Visualizations of Correlation},
doi = {10.1109/TVCG.2015.2467671},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467671},
author = {Kay, M. and Heer, J.},
pages = {469--478},
keywords = {Weber's law, perception of correlation, log transformation, censored regression, Bayesian methods},
abstract = {Models of human perception - including perceptual ΓÇ£lawsΓÇ¥ - can be valuable tools for deriving visualization design recommendations. However, it is important to assess the explanatory power of such models when using them to inform design. We present a secondary analysis of data previously used to rank the effectiveness of bivariate visualizations for assessing correlation (measured with Pearson's r) according to the well-known Weber-Fechner Law. Beginning with the model of Harrison et al. [1], we present a sequence of refinements including incorporation of individual differences, log transformation, censored regression, and adoption of Bayesian statistics. Our model incorporates all observations dropped from the original analysis, including data near ceilings caused by the data collection process and entire visualizations dropped due to large numbers of observations worse than chance. This model deviates from Weber's Law, but provides improved predictive accuracy and generalization. Using Bayesian credibility intervals, we derive a partial ranking that groups visualizations with similar performance, and we give precise estimates of the difference in performance between these groups. We find that compared to other visualizations, scatterplots are unique in combining low variance between individuals and high precision on both positively- and negatively correlated data. We conclude with a discussion of the value of data sharing and replication, and share implications for modeling similar experimental data.},
}
@article{p13,
journal = {IEEE TVCG},
year = 2015,
title = {Evaluation of Parallel Coordinates: Overview, Categorization and Guidelines for Future Research},
doi = {10.1109/TVCG.2015.2466992},
url = {http://dx.doi.org/10.1109/TVCG.2015.2466992},
author = {Johansson, J. and Forsell, C.},
pages = {579--588},
keywords = {Survey, evaluation, guidelines, parallel coordinates},
abstract = {The parallel coordinates technique is widely used for the analysis of multivariate data. During recent decades significant research efforts have been devoted to exploring the applicability of the technique and to expand upon it, resulting in a variety of extensions. Of these many research activities, a surprisingly small number concerns user-centred evaluations investigating actual use and usability issues for different tasks, data and domains. The result is a clear lack of convincing evidence to support and guide uptake by users as well as future research directions. To address these issues this paper contributes a thorough literature survey of what has been done in the area of user-centred evaluation of parallel coordinates. These evaluations are divided into four categories based on characterization of use, derived from the survey. Based on the data from the survey and the categorization combined with the authors' experience of working with parallel coordinates, a set of guidelines for future research directions is proposed.},
}
@article{p14,
journal = {IEEE TVCG},
year = 2015,
title = {Guidelines for Effective Usage of Text Highlighting Techniques},
doi = {10.1109/TVCG.2015.2467759},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467759},
author = {Strobelt, H. and Oelke, D. and Bum Chul Kwon and Schreck, T. and Pfister, H.},
pages = {489--498},
keywords = {Text highlighting techniques, visual document analytics, text annotation, crowdsourced study},
abstract = {Semi-automatic text analysis involves manual inspection of text. Often, different text annotations (like part-of-speech or named entities) are indicated by using distinctive text highlighting techniques. In typesetting there exist well-known formatting conventions, such as bold typeface, italics, or background coloring, that are useful for highlighting certain parts of a given text. Also, many advanced techniques for visualization and highlighting of text exist; yet, standard typesetting is common, and the effects of standard typesetting on the perception of text are not fully understood. As such, we surveyed and tested the effectiveness of common text highlighting techniques, both individually and in combination, to discover how to maximize pop-out effects while minimizing visual interference between techniques. To validate our findings, we conducted a series of crowd-sourced experiments to determine: i) a ranking of nine commonly-used text highlighting techniques; ii) the degree of visual interference between pairs of text highlighting techniques; iii) the effectiveness of techniques for visual conjunctive search. Our results show that increasing font size works best as a single highlighting technique, and that there are significant visual interferences between some pairs of highlighting techniques. We discuss the pros and cons of different combinations as a design guideline to choose text highlighting techniques for text viewers.},
}
@article{p15,
journal = {IEEE TVCG},
year = 2015,
title = {High-Quality Ultra-Compact Grid Layout of Grouped Networks},
doi = {10.1109/TVCG.2015.2467251},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467251},
author = {Yoghourdjian, V. and Dwyer, T. and Gange, G. and Kieffer, S. and Klein, K. and Marriott, K.},
pages = {339--348},
keywords = {Network visualization, graph drawing, power graph, optimization, large-neighborhood search},
abstract = {Prior research into network layout has focused on fast heuristic techniques for layout of large networks, or complex multi-stage pipelines for higher quality layout of small graphs. Improvements to these pipeline techniques, especially for orthogonal-style layout, are difficult and practical results have been slight in recent years. Yet, as discussed in this paper, there remain significant issues in the quality of the layouts produced by these techniques, even for quite small networks. This is especially true when layout with additional grouping constraints is required. The first contribution of this paper is to investigate an ultra-compact, grid-like network layout aesthetic that is motivated by the grid arrangements that are used almost universally by designers in typographical layout. Since the time when these heuristic and pipeline-based graph-layout methods were conceived, generic technologies (MIP, CP and SAT) for solving combinatorial and mixed-integer optimization problems have improved massively. The second contribution of this paper is to reassess whether these techniques can be used for high-quality layout of small graphs. While they are fast enough for graphs of up to 50 nodes we found these methods do not scale up. Our third contribution is a large-neighborhood search meta-heuristic approach that is scalable to larger networks.},
}
@article{p16,
journal = {IEEE TVCG},
year = 2015,
title = {HOLA: Human-like Orthogonal Network Layout},
doi = {10.1109/TVCG.2015.2467451},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467451},
author = {Kieffer, S. and Dwyer, T. and Marriott, K. and Wybrow, M.},
pages = {349--358},
keywords = {Graph layout, orthogonal layout, automatic layout algorithms, user-generated layout, graph-drawing aesthetics},
abstract = {Over the last 50 years a wide variety of automatic network layout algorithms have been developed. Some are fast heuristic techniques suitable for networks with hundreds of thousands of nodes while others are multi-stage frameworks for higher-quality layout of smaller networks. However, despite decades of research currently no algorithm produces layout of comparable quality to that of a human. We give a new ΓÇ£human-centredΓÇ¥ methodology for automatic network layout algorithm design that is intended to overcome this deficiency. User studies are first used to identify the aesthetic criteria algorithms should encode, then an algorithm is developed that is informed by these criteria and finally, a follow-up study evaluates the algorithm output. We have used this new methodology to develop an automatic orthogonal network layout method, HOLA, that achieves measurably better (by user study) layout than the best available orthogonal layout algorithm and which produces layouts of comparable quality to those produced by hand.},
}
@article{p17,
journal = {IEEE TVCG},
year = 2015,
title = {How do People Make Sense of Unfamiliar Visualizations?: A Grounded Model of Novice's Information Visualization Sensemaking},
doi = {10.1109/TVCG.2015.2467195},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467195},
author = {Sukwon Lee and Sung-Hee Kim and Ya-Hsin Hung and Lam, H. and Youn-ah Kang and Ji Soo Yi},
pages = {499--508},
keywords = {Sensemaking model, information visualization, novice users, grounded theory, qualitative study},
abstract = {In this paper, we would like to investigate how people make sense of unfamiliar information visualizations. In order to achieve the research goal, we conducted a qualitative study by observing 13 participants when they endeavored to make sense of three unfamiliar visualizations (i.e., a parallel-coordinates plot, a chord diagram, and a treemap) that they encountered for the first time. We collected data including audio/video record of think-aloud sessions and semi-structured interview; and analyzed the data using the grounded theory method. The primary result of this study is a grounded model of NOvice's information VIsualization Sensemaking (NOVIS model), which consists of the five major cognitive activities: 1 encountering visualization, 2 constructing a frame, 3 exploring visualization, 4 questioning the frame, and 5 floundering on visualization. We introduce the NOVIS model by explaining the five activities with representative quotes from our participants. We also explore the dynamics in the model. Lastly, we compare with other existing models and share further research directions that arose from our observations.},
}
@article{p18,
journal = {IEEE TVCG},
year = 2015,
title = {Improving Bayesian Reasoning: The Effects of Phrasing, Visualization, and Spatial Ability},
doi = {10.1109/TVCG.2015.2467758},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467758},
author = {Ottley, A. and Peck, E.M. and Harrison, L. and Afergan, D. and Ziemkiewicz, C. and Taylor, H.A. and Han, P.K.J. and Chang, R.},
pages = {529--538},
keywords = {Bayesian Reasoning, Visualization, Spatial Ability, Individual Differences},
abstract = {Decades of research have repeatedly shown that people perform poorly at estimating and understanding conditional probabilities that are inherent in Bayesian reasoning problems. Yet in the medical domain, both physicians and patients make daily, life-critical judgments based on conditional probability. Although there have been a number of attempts to develop more effective ways to facilitate Bayesian reasoning, reports of these findings tend to be inconsistent and sometimes even contradictory. For instance, the reported accuracies for individuals being able to correctly estimate conditional probability range from 6% to 62%. In this work, we show that problem representation can significantly affect accuracies. By controlling the amount of information presented to the user, we demonstrate how text and visualization designs can increase overall accuracies to as high as 77%. Additionally, we found that for users with high spatial ability, our designs can further improve their accuracies to as high as 100%. By and large, our findings provide explanations for the inconsistent reports on accuracy in Bayesian reasoning tasks and show a significant improvement over existing methods. We believe that these findings can have immediate impact on risk communication in health-related fields.},
}
@article{p19,
journal = {IEEE TVCG},
year = 2015,
title = {Matches, Mismatches, and Methods: Multiple-View Workflows for Energy Portfolio Analysis},
doi = {10.1109/TVCG.2015.2466971},
url = {http://dx.doi.org/10.1109/TVCG.2015.2466971},
author = {Brehmer, M. and Ng, J. and Tate, K. and Munzner, T.},
pages = {449--458},
keywords = {Design study, design methodologies, time series data, task and requirements analysis, coordinated and multiple views},
abstract = {The energy performance of large building portfolios is challenging to analyze and monitor, as current analysis tools are not scalable or they present derived and aggregated data at too coarse of a level. We conducted a visualization design study, beginning with a thorough work domain analysis and a characterization of data and task abstractions. We describe generalizable visual encoding design choices for time-oriented data framed in terms of matches and mismatches, as well as considerations for workflow design. Our designs address several research questions pertaining to scalability, view coordination, and the inappropriateness of line charts for derived and aggregated data due to a combination of data semantics and domain convention. We also present guidelines relating to familiarity and trust, as well as methodological considerations for visualization design studies. Our designs were adopted by our collaborators and incorporated into the design of an energy analysis software application that will be deployed to tens of thousands of energy workers in their client base.},
}
@article{p20,
journal = {IEEE TVCG},
year = 2015,
title = {Off the Radar: Comparative Evaluation of Radial Visualization Solutions for Composite Indicators},
doi = {10.1109/TVCG.2015.2467322},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467322},
author = {Albo, Y. and Lanir, J. and Bak, P. and Rafaeli, S.},
pages = {569--578},
keywords = {Visualization evaluation, radial layout design, composite indicator visualization, experiment},
abstract = {A composite indicator (CI) is a measuring and benchmark tool used to capture multi-dimensional concepts, such as Information and Communication Technology (ICT) usage. Individual indicators are selected and combined to reflect a phenomena being measured. Visualization of a composite indicator is recommended as a tool to enable interested stakeholders, as well as the public audience, to better understand the indicator components and evolution overtime. However, existing CI visualizations introduce a variety of solutions and there is a lack in CI's visualization guidelines. Radial visualizations are popular among these solutions because of CI's inherent multi-dimensionality. Although in dispute, Radar-charts are often used for CI presentation. However, no empirical evidence on Radar's effectiveness and efficiency for common CI tasks is available. In this paper, we aim to fill this gap by reporting on a controlled experiment that compares the Radar chart technique with two other radial visualization methods: Flowercharts as used in the well-known OECD Betterlife index, and Circle-charts which could be adopted for this purpose. Examples of these charts in the current context are shown in Figure 1. We evaluated these charts, showing the same data with each of the mentioned techniques applying small multiple views for different dimensions of the data. We compared users' performance and preference empirically under a formal task-taxonomy. Results indicate that the Radar chart was the least effective and least liked, while performance of the two other options were mixed and dependent on the task. Results also showed strong preference of participants toward the Flower chart. Summarizing our results, we provide specific design guidelines for composite indicator visualization.},
}
@article{p21,
journal = {IEEE TVCG},
year = 2015,
title = {Optimal Sets of Projections of High-Dimensional Data},
doi = {10.1109/TVCG.2015.2467132},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467132},
author = {Lehmann, D.J. and Theisel, H.},
pages = {609--618},
keywords = {Multivariate Projections, Star Coordinates, Radial Visualization, High-dimensional Data},
abstract = {Finding good projections of n-dimensional datasets into a 2D visualization domain is one of the most important problems in Information Visualization. Users are interested in getting maximal insight into the data by exploring a minimal number of projections. However, if the number is too small or improper projections are used, then important data patterns might be overlooked. We propose a data-driven approach to find minimal sets of projections that uniquely show certain data patterns. For this we introduce a dissimilarity measure of data projections that discards affine transformations of projections and prevents repetitions of the same data patterns. Based on this, we provide complete data tours of at most n/2 projections. Furthermore, we propose optimal paths of projection matrices for an interactive data exploration. We illustrate our technique with a set of state-of-the-art real high-dimensional benchmark datasets.},
}
@article{p22,
journal = {IEEE TVCG},
year = 2015,
title = {Orientation-Enhanced Parallel Coordinate Plots},
doi = {10.1109/TVCG.2015.2467872},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467872},
author = {Raidou, R.G. and Eisemann, M. and Breeuwer, M. and Eisemann, E. and Vilanova, A.},
pages = {589--598},
keywords = {Parallel Coordinates, Orientation-enhanced Parallel Coordinates, Brushing, Orientation-enhanced Brushing, Data Readability, Data Selection},
abstract = {Parallel Coordinate Plots (PCPs) is one of the most powerful techniques for the visualization of multivariate data. However, for large datasets, the representation suffers from clutter due to overplotting. In this case, discerning the underlying data information and selecting specific interesting patterns can become difficult. We propose a new and simple technique to improve the display of PCPs by emphasizing the underlying data structure. Our Orientation-enhanced Parallel Coordinate Plots (OPCPs) improve pattern and outlier discernibility by visually enhancing parts of each PCP polyline with respect to its slope. This enhancement also allows us to introduce a novel and efficient selection method, the Orientation-enhanced Brushing (O-Brushing). Our solution is particularly useful when multiple patterns are present or when the view on certain patterns is obstructed by noise. We present the results of our approach with several synthetic and real-world datasets. Finally, we conducted a user evaluation, which verifies the advantages of the OPCPs in terms of discernibility of information in complex data. It also confirms that O-Brushing eases the selection of data patterns in PCPs and reduces the amount of necessary user interactions compared to state-of-the-art brushing techniques.},
}
@article{p23,
journal = {IEEE TVCG},
year = 2015,
title = {Poemage: Visualizing the Sonic Topology of a Poem},
doi = {10.1109/TVCG.2015.2467811},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467811},
author = {McCurdy, N. and Lein, J. and Coles, K. and Meyer, M.},
pages = {439--448},
keywords = {Visualization in the humanities, design studies, text and document data, graph/network data},
abstract = {The digital humanities have experienced tremendous growth within the last decade, mostly in the context of developing computational tools that support what is called distant reading - collecting and analyzing huge amounts of textual data for synoptic evaluation. On the other end of the spectrum is a practice at the heart of the traditional humanities, close reading - the careful, in-depth analysis of a single text in order to extract, engage, and even generate as much productive meaning as possible. The true value of computation to close reading is still very much an open question. During a two-year design study, we explored this question with several poetry scholars, focusing on an investigation of sound and linguistic devices in poetry. The contributions of our design study include a problem characterization and data abstraction of the use of sound in poetry as well as Poemage, a visualization tool for interactively exploring the sonic topology of a poem. The design of Poemage is grounded in the evaluation of a series of technology probes we deployed to our poetry collaborators, and we validate the final design with several case studies that illustrate the disruptive impact technology can have on poetry scholarship. Finally, we also contribute a reflection on the challenges we faced conducting visualization research in literary studies.},
}
@article{p24,
journal = {IEEE TVCG},
year = 2015,
title = {Probing Projections: Interaction Techniques for Interpreting Arrangements and Errors of Dimensionality Reductions},
doi = {10.1109/TVCG.2015.2467717},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467717},
author = {Stahnke, J. and Dörk, M. and Müller, B. and Thom, A.},
pages = {629--638},
keywords = {Information visualization, interactivity, dimensionality reduction, multidimensional scaling},
abstract = {We introduce a set of integrated interaction techniques to interpret and interrogate dimensionality-reduced data. Projection techniques generally aim to make a high-dimensional information space visible in form of a planar layout. However, the meaning of the resulting data projections can be hard to grasp. It is seldom clear why elements are placed far apart or close together and the inevitable approximation errors of any projection technique are not exposed to the viewer. Previous research on dimensionality reduction focuses on the efficient generation of data projections, interactive customisation of the model, and comparison of different projection techniques. There has been only little research on how the visualization resulting from data projection is interacted with. We contribute the concept of probing as an integrated approach to interpreting the meaning and quality of visualizations and propose a set of interactive methods to examine dimensionality-reduced data as well as the projection itself. The methods let viewers see approximation errors, question the positioning of elements, compare them to each other, and visualize the influence of data dimensions on the projection space. We created a web-based system implementing these methods, and report on findings from an evaluation with data analysts using the prototype to examine multidimensional datasets.},
}
@article{p25,
journal = {IEEE TVCG},
year = 2015,
title = {Reactive Vega: A Streaming Dataflow Architecture for Declarative Interactive Visualization},
doi = {10.1109/TVCG.2015.2467091},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467091},
author = {Satyanarayan, A. and Russell, R. and Hoffswell, J. and Heer, J.},
pages = {659--668},
keywords = {Information visualization, systems, toolkits, declarative specification, optimization, interaction, streaming data},
abstract = {We present Reactive Vega, a system architecture that provides the first robust and comprehensive treatment of declarative visual and interaction design for data visualization. Starting from a single declarative specification, Reactive Vega constructs a dataflow graph in which input data, scene graph elements, and interaction events are all treated as first-class streaming data sources. To support expressive interactive visualizations that may involve time-varying scalar, relational, or hierarchical data, Reactive Vega's dataflow graph can dynamically re-write itself at runtime by extending or pruning branches in a data-driven fashion. We discuss both compile- and run-time optimizations applied within Reactive Vega, and share the results of benchmark studies that indicate superior interactive performance to both D3 and the original, non-reactive Vega system.},
}
@article{p26,
journal = {IEEE TVCG},
year = 2015,
title = {SchemeLens: A Content-Aware Vector-Based Fisheye Technique for Navigating Large Systems Diagrams},
doi = {10.1109/TVCG.2015.2467035},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467035},
author = {Cohé, A. and Liutkus, B. and Bailly, G. and Eagan, J. and Lecolinet, E.},
pages = {330--338},
keywords = {Fisheye, vector-scaling, content-aware, network schematics, interactive zoom, navigation, information visualization},
abstract = {System schematics, such as those used for electrical or hydraulic systems, can be large and complex. Fisheye techniques can help navigate such large documents by maintaining the context around a focus region, but the distortion introduced by traditional fisheye techniques can impair the readability of the diagram. We present SchemeLens, a vector-based, topology-aware fisheye technique which aims to maintain the readability of the diagram. Vector-based scaling reduces distortion to components, but distorts layout. We present several strategies to reduce this distortion by using the structure of the topology, including orthogonality and alignment, and a model of user intention to foster smooth and predictable navigation. We evaluate this approach through two user studies: Results show that (1) SchemeLens is 16-27% faster than both round and rectangular flat-top fisheye lenses at finding and identifying a target along one or several paths in a network diagram; (2) augmenting SchemeLens with a model of user intentions aids in learning the network topology.},
}
@article{p27,
journal = {IEEE TVCG},
year = 2015,
title = {Sketching Designs Using the Five Design-Sheet Methodology},
doi = {10.1109/TVCG.2015.2467271},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467271},
author = {Roberts, J.C. and Headleand, C. and Ritsos, P.D.},
pages = {419--428},
keywords = {Lo-fidelity prototyping, User-centred design, Sketching for visualization, Ideation},
abstract = {Sketching designs has been shown to be a useful way of planning and considering alternative solutions. The use of lo-fidelity prototyping, especially paper-based sketching, can save time, money and converge to better solutions more quickly. However, this design process is often viewed to be too informal. Consequently users do not know how to manage their thoughts and ideas (to first think divergently, to then finally converge on a suitable solution). We present the Five Design Sheet (FdS) methodology. The methodology enables users to create information visualization interfaces through lo-fidelity methods. Users sketch and plan their ideas, helping them express different possibilities, think through these ideas to consider their potential effectiveness as solutions to the task (sheet 1); they create three principle designs (sheets 2,3 and 4); before converging on a final realization design that can then be implemented (sheet 5). In this article, we present (i) a review of the use of sketching as a planning method for visualization and the benefits of sketching, (ii) a detailed description of the Five Design Sheet (FdS) methodology, and (iii) an evaluation of the FdS using the System Usability Scale, along with a case-study of its use in industry and experience of its use in teaching.},
}
@article{p28,
journal = {IEEE TVCG},
year = 2015,
title = {Spatial Reasoning and Data Displays},
doi = {10.1109/TVCG.2015.2469125},
url = {http://dx.doi.org/10.1109/TVCG.2015.2469125},
author = {VanderPlas, S. and Hofmann, H.},
pages = {459--468},
keywords = {Data visualization, Perception, Statistical graphics, Statistical computing},
abstract = {Graphics convey numerical information very efficiently, but rely on a different set of mental processes than tabular displays. Here, we present a study relating demographic characteristics and visual skills to perception of graphical lineups. We conclude that lineups are essentially a classification test in a visual domain, and that performance on the lineup protocol is associated with general aptitude, rather than specific tasks such as card rotation and spatial manipulation. We also examine the possibility that specific graphical tasks may be associated with certain visual skills and conclude that more research is necessary to understand which visual skills are required in order to understand certain plot types.},
}
@article{p29,
journal = {IEEE TVCG},
year = 2015,
title = {Speculative Practices: Utilizing InfoVis to Explore Untapped Literary Collections},
doi = {10.1109/TVCG.2015.2467452},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467452},
author = {Hinrichs, U. and Forlini, S. and Moynihan, B.},
pages = {429--438},
keywords = {Digital Humanities, Interlinked Visualization, Literary Studies, Cultural Collections, Science Fiction},
abstract = {In this paper we exemplify how information visualization supports speculative thinking, hypotheses testing, and preliminary interpretation processes as part of literary research. While InfoVis has become a buzz topic in the digital humanities, skepticism remains about how effectively it integrates into and expands on traditional humanities research approaches. From an InfoVis perspective, we lack case studies that show the specific design challenges that make literary studies and humanities research at large a unique application area for information visualization. We examine these questions through our case study of the Speculative W@nderverse, a visualization tool that was designed to enable the analysis and exploration of an untapped literary collection consisting of thousands of science fiction short stories. We present the results of two empirical studies that involved general-interest readers and literary scholars who used the evolving visualization prototype as part of their research for over a year. Our findings suggest a design space for visualizing literary collections that is defined by (1) their academic and public relevance, (2) the tension between qualitative vs. quantitative methods of interpretation, (3) result-vs. process-driven approaches to InfoVis, and (4) the unique material and visual qualities of cultural collections. Through the Speculative W@nderverse we demonstrate how visualization can bridge these sometimes contradictory perspectives by cultivating curiosity and providing entry points into literary collections while, at the same time, supporting multiple aspects of humanities research processes.},
}
@article{p30,
journal = {IEEE TVCG},
year = 2015,
title = {Suggested Interactivity: Seeking Perceived Affordances for Information Visualization},
doi = {10.1109/TVCG.2015.2467201},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467201},
author = {Boy, J. and Eveillard, L. and Detienne, F. and Fekete, J.},
pages = {639--648},
keywords = {Suggested interactivity, perceived affordances, information visualization for the people, online visualization},
abstract = {In this article, we investigate methods for suggesting the interactivity of online visualizations embedded with text. We first assess the need for such methods by conducting three initial experiments on Amazon's Mechanical Turk. We then present a design space for Suggested Interactivity (i. e., visual cues used as perceived affordances-SI), based on a survey of 382 HTML5 and visualization websites. Finally, we assess the effectiveness of three SI cues we designed for suggesting the interactivity of bar charts embedded with text. Our results show that only one cue (SI3) was successful in inciting participants to interact with the visualizations, and we hypothesize this is because this particular cue provided feedforward.},
}
@article{p31,
journal = {IEEE TVCG},
year = 2015,
title = {Time Curves: Folding Time to Visualize Patterns of Temporal Evolution in Data},
doi = {10.1109/TVCG.2015.2467851},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467851},
author = {Bach, B. and Conglei Shi and Heulot, N. and Madhyastha, T. and Grabowski, T. and Dragicevic, P.},
pages = {559--568},
keywords = {Temporal data visualization, information visualization, multidimensional scaling},
abstract = {We introduce time curves as a general approach for visualizing patterns of evolution in temporal data. Examples of such patterns include slow and regular progressions, large sudden changes, and reversals to previous states. These patterns can be of interest in a range of domains, such as collaborative document editing, dynamic network analysis, and video analysis. Time curves employ the metaphor of folding a timeline visualization into itself so as to bring similar time points close to each other. This metaphor can be applied to any dataset where a similarity metric between temporal snapshots can be defined, thus it is largely datatype-agnostic. We illustrate how time curves can visually reveal informative patterns in a range of different datasets.},
}
@article{p32,
journal = {IEEE TVCG},
year = 2015,
title = {TimeNotes: A Study on Effective Chart Visualization and Interaction Techniques for Time-Series Data},
doi = {10.1109/TVCG.2015.2467751},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467751},
author = {Walker, J. and Borgo, R. and Jones, M.W.},
pages = {549--558},
keywords = {Time-series Exploration, Focus+Context, Lens, Interaction Techniques},
abstract = {Collecting sensor data results in large temporal data sets which need to be visualized, analyzed, and presented. One-dimensional time-series charts are used, but these present problems when screen resolution is small in comparison to the data. This can result in severe over-plotting, giving rise for the requirement to provide effective rendering and methods to allow interaction with the detailed data. Common solutions can be categorized as multi-scale representations, frequency based, and lens based interaction techniques. In this paper, we comparatively evaluate existing methods, such as Stack Zoom [15] and ChronoLenses [38], giving a graphical overview of each and classifying their ability to explore and interact with data. We propose new visualizations and other extensions to the existing approaches. We undertake and report an empirical study and a field study using these techniques.},
}
@article{p33,
journal = {IEEE TVCG},
year = 2015,
title = {TimeSpan: Using Visualization to Explore Temporal Multi-dimensional Data of Stroke Patients},
doi = {10.1109/TVCG.2015.2467325},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467325},
author = {Loorak, M.H. and Perin, C. and Kamal, N. and Hill, M. and Carpendale, S.},
pages = {409--418},
keywords = {Multi-dimensional data, Temporal event sequences, Electronic health records},
abstract = {We present TimeSpan, an exploratory visualization tool designed to gain a better understanding of the temporal aspects of the stroke treatment process. Working with stroke experts, we seek to provide a tool to help improve outcomes for stroke victims. Time is of critical importance in the treatment of acute ischemic stroke patients. Every minute that the artery stays blocked, an estimated 1.9 million neurons and 12 km of myelinated axons are destroyed. Consequently, there is a critical need for efficiency of stroke treatment processes. Optimizing time to treatment requires a deep understanding of interval times. Stroke health care professionals must analyze the impact of procedures, events, and patient attributes on time-ultimately, to save lives and improve quality of life after stroke. First, we interviewed eight domain experts, and closely collaborated with two of them to inform the design of TimeSpan. We classify the analytical tasks which a visualization tool should support and extract design goals from the interviews and field observations. Based on these tasks and the understanding gained from the collaboration, we designed TimeSpan, a web-based tool for exploring multi-dimensional and temporal stroke data. We describe how TimeSpan incorporates factors from stacked bar graphs, line charts, histograms, and a matrix visualization to create an interactive hybrid view of temporal data. From feedback collected from domain experts in a focus group session, we reflect on the lessons we learned from abstracting the tasks and iteratively designing TimeSpan.},
}
@article{p34,
journal = {IEEE TVCG},
year = 2015,
title = {Vials: Visualizing Alternative Splicing of Genes},
doi = {10.1109/TVCG.2015.2467911},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467911},
author = {Strobelt, H. and Alsallakh, B. and Botros, J. and Peterson, B. and Borowsky, M. and Pfister, H. and Lex, A.},
pages = {399--408},
keywords = {Biology visualization, protein isoforms, mRNA-seq, directed acyclic graphs, multivariate networks},
abstract = {Alternative splicing is a process by which the same DNA sequence is used to assemble different proteins, called protein isoforms. Alternative splicing works by selectively omitting some of the coding regions (exons) typically associated with a gene. Detection of alternative splicing is difficult and uses a combination of advanced data acquisition methods and statistical inference. Knowledge about the abundance of isoforms is important for understanding both normal processes and diseases and to eventually improve treatment through targeted therapies. The data, however, is complex and current visualizations for isoforms are neither perceptually efficient nor scalable. To remedy this, we developed Vials, a novel visual analysis tool that enables analysts to explore the various datasets that scientists use to make judgments about isoforms: the abundance of reads associated with the coding regions of the gene, evidence for junctions, i.e., edges connecting the coding regions, and predictions of isoform frequencies. Vials is scalable as it allows for the simultaneous analysis of many samples in multiple groups. Our tool thus enables experts to (a) identify patterns of isoform abundance in groups of samples and (b) evaluate the quality of the data. We demonstrate the value of our tool in case studies using publicly available datasets.},
}
@article{p35,
journal = {IEEE TVCG},
year = 2015,
title = {Visual Encodings of Temporal Uncertainty: A Comparative User Study},
doi = {10.1109/TVCG.2015.2467752},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467752},
author = {Gschwandtner, T. and Bögl, M. and Federico, P. and Miksch, S.},
pages = {539--548},
keywords = {Uncertainty, temporal intervals, visualization},
abstract = {A number of studies have investigated different ways of visualizing uncertainty. However, in the temporal dimension, it is still an open question how to best represent uncertainty, since the special characteristics of time require special visual encodings and may provoke different interpretations. Thus, we have conducted a comprehensive study comparing alternative visual encodings of intervals with uncertain start and end times: gradient plots, violin plots, accumulated probability plots, error bars, centered error bars, and ambiguation. Our results reveal significant differences in error rates and completion time for these different visualization types and different tasks. We recommend using ambiguation - using a lighter color value to represent uncertain regions - or error bars for judging durations and temporal bounds, and gradient plots - using fading color or transparency - for judging probability values.},
}
@article{p36,
journal = {IEEE TVCG},
year = 2015,
title = {Visual Mementos: Reflecting Memories with Personal Data},
doi = {10.1109/TVCG.2015.2467831},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467831},
author = {Thudt, A. and Baur, D. and Huron, S. and Carpendale, S.},
pages = {369--378},
keywords = {Visual Memento, Memories, Personal Visualization, Movement Data, World Wide Web},
abstract = {In this paper we discuss the creation of visual mementos as a new application area for visualization. We define visual mementos as visualizations of personally relevant data for the purpose of reminiscing, and sharing of life experiences. Today more people collect digital information about their life than ever before. The shift from physical to digital archives poses new challenges and opportunities for self-reflection and self-representation. Drawing on research on autobiographical memory and on the role of artifacts in reminiscing, we identified design challenges for visual mementos: mapping data to evoke familiarity, expressing subjectivity, and obscuring sensitive details for sharing. Visual mementos can make use of the known strengths of visualization in revealing patterns to show the familiar instead of the unexpected, and extend representational mappings beyond the objective to include the more subjective. To understand whether people's subjective views on their past can be reflected in a visual representation, we developed, deployed and studied a technology probe that exemplifies our concept of visual mementos. Our results show how reminiscing has been supported and reveal promising new directions for self-reflection and sharing through visual mementos of personal experiences.},
}
@article{p37,
journal = {IEEE TVCG},
year = 2015,
title = {Visualization, Selection, and Analysis of Traffic Flows},
doi = {10.1109/TVCG.2015.2467112},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467112},
author = {Scheepens, R. and Hurter, C. and van de Wetering, H. and van Wijk, J.J.},
pages = {379--388},
keywords = {Moving Object Visualization, traffic flows, interaction},
abstract = {Visualization of the trajectories of moving objects leads to dense and cluttered images, which hinders exploration and understanding. It also hinders adding additional visual information, such as direction, and makes it difficult to interactively extract traffic flows, i.e., subsets of trajectories. In this paper we present our approach to visualize traffic flows and provide interaction tools to support their exploration. We show an overview of the traffic using a density map. The directions of traffic flows are visualized using a particle system on top of the density map. The user can extract traffic flows using a novel selection widget that allows for the intuitive selection of an area, and filtering on a range of directions and any additional attributes. Using simple, visual set expressions, the user can construct more complicated selections. The dynamic behaviors of selected flows may then be shown in annotation windows in which they can be interactively explored and compared. We validate our approach through use cases where we explore and analyze the temporal behavior of aircraft and vessel trajectories, e.g., landing and takeoff sequences, or the evolution of flight route density. The aircraft use cases have been developed and validated in collaboration with domain experts.},
}
@article{p38,
journal = {IEEE TVCG},
year = 2015,
title = {Visualizing Multiple Variables Across Scale and Geography},
doi = {10.1109/TVCG.2015.2467199},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467199},
author = {Goodwin, S. and Dykes, J. and Slingsby, A. and Turkay, C.},
pages = {599--608},
keywords = {Scale, Geography, Multivariate, Sensitivity Analysis, Variable Selection, Local Statistics, Geodemographics, Energy},
abstract = {Comparing multiple variables to select those that effectively characterize complex entities is important in a wide variety of domains - geodemographics for example. Identifying variables that correlate is a common practice to remove redundancy, but correlation varies across space, with scale and over time, and the frequently used global statistics hide potentially important differentiating local variation. For more comprehensive and robust insights into multivariate relations, these local correlations need to be assessed through various means of defining locality. We explore the geography of this issue, and use novel interactive visualization to identify interdependencies in multivariate data sets to support geographically informed multivariate analysis. We offer terminology for considering scale and locality, visual techniques for establishing the effects of scale on correlation and a theoretical framework through which variation in geographic correlation with scale and locality are addressed explicitly. Prototype software demonstrates how these contributions act together. These techniques enable multiple variables and their geographic characteristics to be considered concurrently as we extend visual parameter space analysis (vPSA) to the spatial domain. We find variable correlations to be sensitive to scale and geography to varying degrees in the context of energy-based geodemographics. This sensitivity depends upon the calculation of locality as well as the geographical and statistical structure of the variable.},
}
@article{p39,
journal = {IEEE TVCG},
year = 2015,
title = {Visually Comparing Weather Features in Forecasts},
doi = {10.1109/TVCG.2015.2467754},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467754},
author = {Quinan, P.S. and Meyer, M.},
pages = {389--398},
keywords = {Design study, weather, geographic/geospatial visualization, ensemble data},
abstract = {Meteorologists process and analyze weather forecasts using visualization in order to examine the behaviors of and relationships among weather features. In this design study conducted with meteorologists in decision support roles, we identified and attempted to address two significant common challenges in weather visualization: the employment of inconsistent and often ineffective visual encoding practices across a wide range of visualizations, and a lack of support for directly visualizing how different weather features relate across an ensemble of possible forecast outcomes. In this work, we present a characterization of the problems and data associated with meteorological forecasting, we propose a set of informed default encoding choices that integrate existing meteorological conventions with effective visualization practice, and we extend a set of techniques as an initial step toward directly visualizing the interactions of multiple features over an ensemble forecast. We discuss the integration of these contributions into a functional prototype tool, and also reflect on the many practical challenges that arise when working with weather data.},
}
@article{p40,
journal = {IEEE TVCG},
year = 2015,
title = {Voyager: Exploratory Analysis via Faceted Browsing of Visualization Recommendations},
doi = {10.1109/TVCG.2015.2467191},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467191},
author = {Wongsuphasawat, K. and Moritz, D. and Anand, A. and Mackinlay, J. and Howe, B. and Heer, J.},
pages = {649--658},
keywords = {User interfaces, information visualization, exploratory analysis, visualization recommendation, mixed-initiative systems},
abstract = {General visualization tools typically require manual specification of views: analysts must select data variables and then choose which transformations and visual encodings to apply. These decisions often involve both domain and visualization design expertise, and may impose a tedious specification process that impedes exploration. In this paper, we seek to complement manual chart construction with interactive navigation of a gallery of automatically-generated visualizations. We contribute Voyager, a mixed-initiative system that supports faceted browsing of recommended charts chosen according to statistical and perceptual measures. We describe Voyager's architecture, motivating design principles, and methods for generating and interacting with visualization recommendations. In a study comparing Voyager to a manual visualization specification tool, we find that Voyager facilitates exploration of previously unseen data and leads to increased data variable coverage. We then distill design implications for visualization tools, in particular the need to balance rapid exploration and targeted question-answering.},
}
@article{p163,
journal = {IEEE TVCG},
year = 2014,
title = {A Principled Way of Assessing Visualization Literacy},
doi = {10.1109/TVCG.2014.2346984},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346984},
author = {Boy, J. and Rensink, R.A. and Bertini, E. and Fekete, J.},
pages = {1963--1972},
keywords = {Literacy, Visualization literacy, Rasch Model, Item Response Theory},
abstract = {We describe a method for assessing the visualization literacy (VL) of a user. Assessing how well people understand visualizations has great value for research (e. g., to avoid confounds), for design (e. g., to best determine the capabilities of an audience), for teaching (e. g., to assess the level of new students), and for recruiting (e. g., to assess the level of interviewees). This paper proposes a method for assessing VL based on Item Response Theory. It describes the design and evaluation of two VL tests for line graphs, and presents the extension of the method to bar charts and scatterplots. Finally, it discusses the reimplementation of these tests for fast, effective, and scalable web-based use.},
}
@article{p164,
journal = {IEEE TVCG},
year = 2014,
title = {Activity Sculptures: Exploring the Impact of Physical Visualizations on Running Activity},
doi = {10.1109/TVCG.2014.2352953},
url = {http://dx.doi.org/10.1109/TVCG.2014.2352953},
author = {Stusak, S. and Tabard, A. and Sauka, F. and Khot, R.A. and Butz, A.},
pages = {2201--2210},
keywords = {Physical Visualizations, Activity Sculptures, Physical Activity, Data Sculptures, Behavioral Change},
abstract = {Data sculptures are a promising type of visualizations in which data is given a physical form. In the past, they have mostly been used for artistic, communicative or educational purposes, and designers of data sculptures argue that in such situations, physical visualizations can be more enriching than pixel-based visualizations. We present the design of Activity Sculptures: data sculptures of running activity. In a three-week field study we investigated the impact of the sculptures on 14 participants' running activity, the personal and social behaviors generated by the sculptures, as well as participants' experiences when receiving these individual physical tokens generated from the specific data of their runs. The physical rewards generated curiosity and personal experimentation but also social dynamics such as discussion on runs or envy/competition. We argue that such passive (or calm) visualizations can complement nudging and other mechanisms of persuasion with a more playful and reflective look at ones' activity.},
}
@article{p165,
journal = {IEEE TVCG},
year = 2014,
title = {An Algebraic Process for Visualization Design},
doi = {10.1109/TVCG.2014.2346325},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346325},
author = {Kindlmann, G. and Scheidegger, C.E.},
pages = {2181--2190},
keywords = {Visualization Design, Symmetries, Visualization Theory},
abstract = {We present a model of visualization design based on algebraic considerations of the visualization process. The model helps characterize visual encodings, guide their design, evaluate their effectiveness, and highlight their shortcomings. The model has three components: the underlying mathematical structure of the data or object being visualized, the concrete representation of the data in a computer, and (to the extent possible) a mathematical description of how humans perceive the visualization. Because we believe the value of our model lies in its practical application, we propose three general principles for good visualization design. We work through a collection of examples where our model helps explain the known properties of existing visualizations methods, both good and not-so-good, as well as suggesting some novel methods. We describe how to use the model alongside experimental user studies, since it can help frame experiment outcomes in an actionable manner. Exploring the implications and applications of our model and its design principles should provide many directions for future visualization research.},
}
@article{p166,
journal = {IEEE TVCG},
year = 2014,
title = {Attribute Signatures: Dynamic Visual Summaries for Analyzing Multivariate Geographical Data},
doi = {10.1109/TVCG.2014.2346265},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346265},
author = {Turkay, C. and Slingsby, A. and Hauser, H. and Wood, J. and Dykes, J.},
pages = {2033--2042},
keywords = {Visual analytics, multi-variate data, geographic information, geovisualization, interactive data analysis},
abstract = {The visual analysis of geographically referenced datasets with a large number of attributes is challenging due to the fact that the characteristics of the attributes are highly dependent upon the locations at which they are focussed, and the scale and time at which they are measured. Specialized interactive visual methods are required to help analysts in understanding the characteristics of the attributes when these multiple aspects are considered concurrently. Here, we develop attribute signatures-interactively crafted graphics that show the geographic variability of statistics of attributes through which the extent of dependency between the attributes and geography can be visually explored. We compute a number of statistical measures, which can also account for variations in time and scale, and use them as a basis for our visualizations. We then employ different graphical configurations to show and compare both continuous and discrete variation of location and scale. Our methods allow variation in multiple statistical summaries of multiple attributes to be considered concurrently and geographically, as evidenced by examples in which the census geography of London and the wider UK are explored.},
}
@article{p167,
journal = {IEEE TVCG},
year = 2014,
title = {Axis Calibration for Improving Data Attribute Estimation in Star Coordinates Plots},
doi = {10.1109/TVCG.2014.2346258},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346258},
author = {Rubio-Sanchez, M. and Sanchez, A.},
pages = {2013--2022},
keywords = {Star Coordinates, RadViz, Biplots, Axis calibration, Attribute value estimation, Data centering, Orthographic projection},
abstract = {Star coordinates is a well-known multivariate visualization method that produces linear dimensionality reduction mappings through a set of radial axes defined by vectors in an observable space. One of its main drawbacks concerns the difficulty to recover attributes of data samples accurately, which typically lie in the [0], [1] interval, given the locations of the low-dimensional embeddings and the vectors. In this paper we show that centering the data can considerably increase attribute estimation accuracy, where data values can be read off approximately by projecting embedded points onto calibrated (i.e., labeled) axes, similarly to classical statistical biplots. In addition, this idea can be coupled with a recently developed orthonormalization process on the axis vectors that prevents unnecessary distortions. We demonstrate that the combination of both approaches not only enhances the estimates, but also provides more faithful representations of the data.},
}
@article{p168,
journal = {IEEE TVCG},
year = 2014,
title = {Combing the Communication Hairball: Visualizing Parallel Execution Traces using Logical Time},
doi = {10.1109/TVCG.2014.2346456},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346456},
author = {Isaacs, K.E. and Bremer, P.-T. and Jusufi, I. and Gamblin, T. and Bhatele, A. and Schulz, M. and Hamann, B.},
pages = {2349--2358},
keywords = {Information visualization, software visualization, timelines, traces, performance analysis},
abstract = {With the continuous rise in complexity of modern supercomputers, optimizing the performance of large-scale parallel programs is becoming increasingly challenging. Simultaneously, the growth in scale magnifies the impact of even minor inefficiencies - potentially millions of compute hours and megawatts in power consumption can be wasted on avoidable mistakes or sub-optimal algorithms. This makes performance analysis and optimization critical elements in the software development process. One of the most common forms of performance analysis is to study execution traces, which record a history of per-process events and interprocess messages in a parallel application. Trace visualizations allow users to browse this event history and search for insights into the observed performance behavior. However, current visualizations are difficult to understand even for small process counts and do not scale gracefully beyond a few hundred processes. Organizing events in time leads to a virtually unintelligible conglomerate of interleaved events and moderately high process counts overtax even the largest display. As an alternative, we present a new trace visualization approach based on transforming the event history into logical time inferred directly from happened-before relationships. This emphasizes the code's structural behavior, which is much more familiar to the application developer. The original timing data, or other information, is then encoded through color, leading to a more intuitive visualization. Furthermore, we use the discrete nature of logical timelines to cluster processes according to their local behavior leading to a scalable visualization of even long traces on large process counts. We demonstrate our system using two case studies on large-scale parallel codes.},
}
@article{p169,
journal = {IEEE TVCG},
year = 2014,
title = {Comparative Eye Tracking Study on Node-Link Visualizations of Trajectories},
doi = {10.1109/TVCG.2014.2346420},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346420},
author = {Netzel, R. and Burch, M. and Weiskopf, D.},
pages = {2221--2230},
keywords = {User study, eye tracking, evaluation, trajectory visualization, node-link visualization, direction encoding, node splatting, halo rendering},
abstract = {We present the results of an eye tracking study that compares different visualization methods for long, dense, complex, and piecewise linear spatial trajectories. Typical sources of such data are from temporally discrete measurements of the positions of moving objects, for example, recorded GPS tracks of animals in movement ecology. In the repeated-measures within-subjects user study, four variants of node-link visualization techniques are compared, with the following representations of directed links: standard arrow, tapered, equidistant arrows, and equidistant comets. In addition, we investigate the effect of rendering order for the halo visualization of those links as well as the usefulness of node splatting. All combinations of link visualization techniques are tested for different trajectory density levels. We used three types of tasks: tracing of paths, identification of longest links, and estimation of the density of trajectory clusters. Results are presented in the form of the statistical evaluation of task completion time, task solution accuracy, and two eye tracking metrics. These objective results are complemented by a summary of subjective feedback from the participants. The main result of our study is that tapered links perform very well. However, we discuss that equidistant comets and equidistant arrows are a good option to perceive direction information independent of zoom-level of the display.},
}
@article{p170,
journal = {IEEE TVCG},
year = 2014,
title = {Constructing Visual Representations: Investigating the Use of Tangible Tokens},
doi = {10.1109/TVCG.2014.2346292},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346292},
author = {Huron, S. and Jansen, Y. and Carpendale, S.},
pages = {2102--2111},
keywords = {Constructive visualization, Physical visualization, Dynamic visualization, Empirical study, Token, Visualization authoring, Information visualization, Visual mapping, Novices, Visualization construction, Visual analytics},
abstract = {The accessibility of infovis authoring tools to a wide audience has been identified as a major research challenge. A key task in the authoring process is the development of visual mappings. While the infovis community has long been deeply interested in finding effective visual mappings, comparatively little attention has been placed on how people construct visual mappings. In this paper, we present the results of a study designed to shed light on how people transform data into visual representations. We asked people to create, update and explain their own information visualizations using only tangible building blocks. We learned that all participants, most of whom had little experience in visualization authoring, were readily able to create and talk about their own visualizations. Based on our observations, we discuss participants' actions during the development of their visual representations and during their analytic activities. We conclude by suggesting implications for tool design to enable broader support for infovis authoring.},
}
@article{p171,
journal = {IEEE TVCG},
year = 2014,
title = {Design Activity Framework for Visualization Design},
doi = {10.1109/TVCG.2014.2346331},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346331},
author = {McKenna, S. and Mazur, D. and Agutter, J. and Meyer, M.},
pages = {2191--2200},
keywords = {Design, frameworks, process, cybersecurity, nested model, decisions, models, evaluation, visualization},
abstract = {An important aspect in visualization design is the connection between what a designer does and the decisions the designer makes. Existing design process models, however, do not explicitly link back to models for visualization design decisions. We bridge this gap by introducing the design activity framework, a process model that explicitly connects to the nested model, a well-known visualization design decision model. The framework includes four overlapping activities that characterize the design process, with each activity explicating outcomes related to the nested model. Additionally, we describe and characterize a list of exemplar methods and how they overlap among these activities. The design activity framework is the result of reflective discussions from a collaboration on a visualization redesign project, the details of which we describe to ground the framework in a real-world design process. Lastly, from this redesign project we provide several research outcomes in the domain of cybersecurity, including an extended data abstraction and rich opportunities for future visualization research.},
}
@article{p172,
journal = {IEEE TVCG},
year = 2014,
title = {DimpVis: Exploring Time-varying Information Visualizations by Direct Manipulation},
doi = {10.1109/TVCG.2014.2346250},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346250},
author = {Kondo, B. and Collins, C.},
pages = {2003--2012},
keywords = {Time navigation, direct manipulation, information visualization},
abstract = {We introduce a new direct manipulation technique, DimpVis, for interacting with visual items in information visualizations to enable exploration of the time dimension. DimpVis is guided by visual hint paths which indicate how a selected data item changes through the time dimension in a visualization. Temporal navigation is controlled by manipulating any data item along its hint path. All other items are updated to reflect the new time. We demonstrate how the DimpVis technique can be designed to directly manipulate position, colour, and size in familiar visualizations such as bar charts and scatter plots, as a means for temporal navigation. We present results from a comparative evaluation, showing that the DimpVis technique was subjectively preferred and quantitatively competitive with the traditional time slider, and significantly faster than small multiples for a variety of tasks.},
}
@article{p173,
journal = {IEEE TVCG},
year = 2014,
title = {Domino: Extracting, Comparing, and Manipulating Subsets Across Multiple Tabular Datasets},
doi = {10.1109/TVCG.2014.2346260},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346260},
author = {Gratzl, S. and Gehlenborg, N. and Lex, A. and Pfister, H. and Streit, M.},
pages = {2023--2032},
keywords = {Multiple coordinated views, visual linking, relationships, heterogeneous data, categorical data},
abstract = {Answering questions about complex issues often requires analysts to take into account information contained in multiple interconnected datasets. A common strategy in analyzing and visualizing large and heterogeneous data is dividing it into meaningful subsets. Interesting subsets can then be selected and the associated data and the relationships between the subsets visualized. However, neither the extraction and manipulation nor the comparison of subsets is well supported by state-of-the-art techniques. In this paper we present Domino, a novel multiform visualization technique for effectively representing subsets and the relationships between them. By providing comprehensive tools to arrange, combine, and extract subsets, Domino allows users to create both common visualization techniques and advanced visualizations tailored to specific use cases. In addition to the novel technique, we present an implementation that enables analysts to manage the wide range of options that our approach offers. Innovative interactive features such as placeholders and live previews support rapid creation of complex analysis setups. We introduce the technique and the implementation using a simple example and demonstrate scalability and effectiveness in a use case from the field of cancer genomics.},
}
@article{p174,
journal = {IEEE TVCG},
year = 2014,
title = {Effects of Presentation Mode and Pace Control on Performance in Image Classification},
doi = {10.1109/TVCG.2014.2346437},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346437},
author = {van der Corput, P. and van Wijk, J.J.},
pages = {2301--2309},
keywords = {RSVP, image classification, image browsing, multimedia visualization},
abstract = {A common task in visualization is to quickly find interesting items in large sets. When appropriate metadata is missing, automatic queries are impossible and users have to inspect all elements visually. We compared two fundamentally different, but obvious display modes for this task and investigated the difference with respect to effectiveness, efficiency, and satisfaction. The static mode is based on the page metaphor and presents successive pages with a static grid of items. The moving mode is based on the conveyor belt metaphor and lets a grid of items slide though the screen in a continuous flow. In our evaluation, we applied both modes to the common task of browsing images. We performed two experiments where 18 participants had to search for certain target images in a large image collection. The number of shown images per second (pace) was predefined in the first experiment, and under user control in the second one. We conclude that at a fixed pace, the mode has no significant impact on the recall. The perceived pace is generally slower for moving mode, which causes users to systematically choose for a faster real pace than in static mode at the cost of recall, keeping the average number of target images found per second equal for both modes.},
}
@article{p175,
journal = {IEEE TVCG},
year = 2014,
title = {Error Bars Considered Harmful: Exploring Alternate Encodings for Mean and Error},
doi = {10.1109/TVCG.2014.2346298},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346298},
author = {Correll, M. and Gleicher, M.},
pages = {2142--2151},
keywords = {Visual statistics, information visualization, crowd-sourcing, empirical evaluation},
abstract = {When making an inference or comparison with uncertain, noisy, or incomplete data, measurement error and confidence intervals can be as important for judgment as the actual mean values of different groups. These often misunderstood statistical quantities are frequently represented by bar charts with error bars. This paper investigates drawbacks with this standard encoding, and considers a set of alternatives designed to more effectively communicate the implications of mean and error data to a general audience, drawing from lessons learned from the use of visual statistics in the information visualization community. We present a series of crowd-sourced experiments that confirm that the encoding of mean and error significantly changes how viewers make decisions about uncertain data. Careful consideration of design tradeoffs in the visual presentation of data results in human reasoning that is more consistently aligned with statistical inferences. We suggest the use of gradient plots (which use transparency to encode uncertainty) and violin plots (which use width) as better alternatives for inferential tasks than bar charts with error bars.},
}
@article{p176,
journal = {IEEE TVCG},
year = 2014,
title = {Exploring the Placement and Design of Word-Scale Visualizations},
doi = {10.1109/TVCG.2014.2346435},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346435},
author = {Goffin, P. and Willett, W. and Fekete, J. and Isenberg, P.},
pages = {2291--2300},
keywords = {Information visualization, text visualization, sparklines, glyphs, design space, word-scale visualizations},
abstract = {We present an exploration and a design space that characterize the usage and placement of word-scale visualizations within text documents. Word-scale visualizations are a more general version of sparklines-small, word-sized data graphics that allow meta-information to be visually presented in-line with document text. In accordance with Edward Tufte's definition, sparklines are traditionally placed directly before or after words in the text. We describe alternative placements that permit a wider range of word-scale graphics and more flexible integration with text layouts. These alternative placements include positioning visualizations between lines, within additional vertical and horizontal space in the document, and as interactive overlays on top of the text. Each strategy changes the dimensions of the space available to display the visualizations, as well as the degree to which the text must be adjusted or reflowed to accommodate them. We provide an illustrated design space of placement options for word-scale visualizations and identify six important variables that control the placement of the graphics and the level of disruption of the source text. We also contribute a quantitative analysis that highlights the effect of different placements on readability and text disruption. Finally, we use this analysis to propose guidelines to support the design and placement of word-scale visualizations.},
}
@article{p177,
journal = {IEEE TVCG},
year = 2014,
title = {Four Experiments on the Perception of Bar Charts},
doi = {10.1109/TVCG.2014.2346320},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346320},
author = {Talbot, J. and Setlur, V. and Anand, A.},
pages = {2152--2160},
keywords = {Graphical perception, bar charts},
abstract = {Bar charts are one of the most common visualization types. In a classic graphical perception paper, Cleveland & McGill studied how different bar chart designs impact the accuracy with which viewers can complete simple perceptual tasks. They found that people perform substantially worse on stacked bar charts than on aligned bar charts, and that comparisons between adjacent bars are more accurate than between widely separated bars. However, the study did not explore why these differences occur. In this paper, we describe a series of follow-up experiments to further explore and explain their results. While our results generally confirm Cleveland & McGill's ranking of various bar chart configurations, we provide additional insight into the bar chart reading task and the sources of participants' errors. We use our results to propose new hypotheses on the perception of bar charts.},
}
@article{p178,
journal = {IEEE TVCG},
year = 2014,
title = {GLO-STIX: Graph-Level Operations for Specifying Techniques and Interactive eXploration},
doi = {10.1109/TVCG.2014.2346444},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346444},
author = {Stolper, C.D. and Kahng, M. and Zhiyuan Lin and Foerster, F. and Goel, A. and Stasko, J. and Duen Horng Chau},
pages = {2320--2328},
keywords = {Graph-level operations, graph visualization, visualization technique specification, graph analysis, information visualization},
abstract = {The field of graph visualization has produced a wealth of visualization techniques for accomplishing a variety of analysis tasks. Therefore analysts often rely on a suite of different techniques, and visual graph analysis application builders strive to provide this breadth of techniques. To provide a holistic model for specifying network visualization techniques (as opposed to considering each technique in isolation) we present the Graph-Level Operations (GLO) model. We describe a method for identifying GLOs and apply it to identify five classes of GLOs, which can be flexibly combined to re-create six canonical graph visualization techniques. We discuss advantages of the GLO model, including potentially discovering new, effective network visualization techniques and easing the engineering challenges of building multi-technique graph visualization applications. Finally, we implement the GLOs that we identified into the GLO-STIX prototype system that enables an analyst to interactively explore a graph by applying GLOs.},
}
@article{p179,
journal = {IEEE TVCG},
year = 2014,
title = {How Hierarchical Topics Evolve in Large Text Corpora},
doi = {10.1109/TVCG.2014.2346433},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346433},
author = {Weiwei Cui and Shixia Liu and Zhuofeng Wu and Hao Wei},
pages = {2281--2290},
keywords = {Hierarchical topic visualization, evolutionary tree clustering, data transformation},
abstract = {Using a sequence of topic trees to organize documents is a popular way to represent hierarchical and evolving topics in text corpora. However, following evolving topics in the context of topic trees remains difficult for users. To address this issue, we present an interactive visual text analysis approach to allow users to progressively explore and analyze the complex evolutionary patterns of hierarchical topics. The key idea behind our approach is to exploit a tree cut to approximate each tree and allow users to interactively modify the tree cuts based on their interests. In particular, we propose an incremental evolutionary tree cut algorithm with the goal of balancing 1) the fitness of each tree cut and the smoothness between adjacent tree cuts; 2) the historical and new information related to user interests. A time-based visualization is designed to illustrate the evolving topics over time. To preserve the mental map, we develop a stable layout algorithm. As a result, our approach can quickly guide users to progressively gain profound insights into evolving hierarchical topics. We evaluate the effectiveness of the proposed method on Amazon's Mechanical Turk and real-world news data. The results show that users are able to successfully analyze evolving topics in text data.},
}
@article{p180,
journal = {IEEE TVCG},
year = 2014,
title = {iVisDesigner: Expressive Interactive Design of Information Visualizations},
doi = {10.1109/TVCG.2014.2346291},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346291},
author = {Donghao Ren and Hollerer, T. and Xiaoru Yuan},
pages = {2092--2101},
keywords = {Visualization design, Interactive Design, Interaction, Expressiveness, Web-based visualization},
abstract = {We present the design, implementation and evaluation of iVisDesigner, a web-based system that enables users to design information visualizations for complex datasets interactively, without the need for textual programming. Our system achieves high interactive expressiveness through conceptual modularity, covering a broad information visualization design space. iVisDesigner supports the interactive design of interactive visualizations, such as provisioning for responsive graph layouts and different types of brushing and linking interactions. We present the system design and implementation, exemplify it through a variety of illustrative visualization designs and discuss its limitations. A performance analysis and an informal user study are presented to evaluate the system.},
}
@article{p181,
journal = {IEEE TVCG},
year = 2014,
title = {Learning Perceptual Kernels for Visualization Design},
doi = {10.1109/TVCG.2014.2346978},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346978},
author = {Demiralp, C. and Bernstein, M.S. and Heer, J.},
pages = {1933--1942},
keywords = {Visualization, design, encoding, perception, model, crowdsourcing, automated visualization, visual embedding},
abstract = {Visualization design can benefit from careful consideration of perception, as different assignments of visual encoding variables such as color, shape and size affect how viewers interpret data. In this work, we introduce perceptual kernels: distance matrices derived from aggregate perceptual judgments. Perceptual kernels represent perceptual differences between and within visual variables in a reusable form that is directly applicable to visualization evaluation and automated design. We report results from crowd-sourced experiments to estimate kernels for color, shape, size and combinations thereof. We analyze kernels estimated using five different judgment types-including Likert ratings among pairs, ordinal triplet comparisons, and manual spatial arrangement-and compare them to existing perceptual models. We derive recommendations for collecting perceptual similarities, and then demonstrate how the resulting kernels can be applied to automate visualization design decisions.},
}
@article{p182,
journal = {IEEE TVCG},
year = 2014,
title = {LiveGantt: Interactively Visualizing a Large Manufacturing Schedule},
doi = {10.1109/TVCG.2014.2346454},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346454},
author = {Jaemin Jo and Jaeseok Huh and Jonghun Park and Bohyoung Kim and Jinwook Seo},
pages = {2329--2338},
keywords = {Schedule visualization, event sequence visualization, simplification, exploratory interactions, simulation},
abstract = {In this paper, we introduce LiveGantt as a novel interactive schedule visualization tool that helps users explore highly-concurrent large schedules from various perspectives. Although a Gantt chart is the most common approach to illustrate schedules, currently available Gantt chart visualization tools suffer from limited scalability and lack of interactions. LiveGantt is built with newly designed algorithms and interactions to improve conventional charts with better scalability, explorability, and reschedulability. It employs resource reordering and task aggregation to display the schedules in a scalable way. LiveGantt provides four coordinated views and filtering techniques to help users explore and interact with the schedules in more flexible ways. In addition, LiveGantt is equipped with an efficient rescheduler to allow users to instantaneously modify their schedules based on their scheduling experience in the fields. To assess the usefulness of the application of LiveGantt, we conducted a case study on manufacturing schedule data with four industrial engineering researchers. Participants not only grasped an overview of a schedule but also explored the schedule from multiple perspectives to make enhancements.},
}
@article{p183,
journal = {IEEE TVCG},
year = 2014,
title = {MovExp: A Versatile Visualization Tool for Human-Computer Interaction Studies with 3D Performance and Biomechanical Data},
doi = {10.1109/TVCG.2014.2346311},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346311},
author = {Palmas, G. and Bachynskyi, M. and Oulasvirta, A. and Seidel, H.-P. and Weinkauf, T.},
pages = {2359--2368},
keywords = {Information visualization, Design study, Human-Computer Interaction},
abstract = {In Human-Computer Interaction (HCI), experts seek to evaluate and compare the performance and ergonomics of user interfaces. Recently, a novel cost-efficient method for estimating physical ergonomics and performance has been introduced to HCI. It is based on optical motion capture and biomechanical simulation. It provides a rich source for analyzing human movements summarized in a multidimensional data set. Existing visualization tools do not sufficiently support the HCI experts in analyzing this data. We identified two shortcomings. First, appropriate visual encodings are missing particularly for the biomechanical aspects of the data. Second, the physical setup of the user interface cannot be incorporated explicitly into existing tools. We present MovExp, a versatile visualization tool that supports the evaluation of user interfaces. In particular, it can be easily adapted by the HCI experts to include the physical setup that is being evaluated, and visualize the data on top of it. Furthermore, it provides a variety of visual encodings to communicate muscular loads, movement directions, and other specifics of HCI studies that employ motion capture and biomechanical simulation. In this design study, we follow a problem-driven research approach. Based on a formalization of the visualization needs and the data structure, we formulate technical requirements for the visualization tool and present novel solutions to the analysis needs of the HCI experts. We show the utility of our tool with four case studies from the daily work of our HCI experts.},
}
@article{p184,
journal = {IEEE TVCG},
year = 2014,
title = {Moving beyond sequential design: Reflections on a rich multi-channel approach to data visualization},
doi = {10.1109/TVCG.2014.2346323},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346323},
author = {Wood, J. and Beecham, R. and Dykes, J.},
pages = {2171--2180},
keywords = {Movement visualization, visual analytics, bikeshare, impact, visualization models, design study},
abstract = {We reflect on a four-year engagement with transport authorities and others involving a large dataset describing the use of a public bicycle-sharing scheme. We describe the role visualization of these data played in fostering engagement with policy makers, transport operators, the transport research community, the museum and gallery sector and the general public. We identify each of these as `channels'-evolving relationships between producers and consumers of visualization-where traditional roles of the visualization expert and domain expert are blurred. In each case, we identify the different design decisions that were required to support each of these channels and the role played by the visualization process. Using chauffeured interaction with a flexible visual analytics system we demonstrate how insight was gained by policy makers into gendered spatio-temporal cycle behaviors, how this led to further insight into workplace commuting activity, group cycling behavior and explanations for street navigation choice. We demonstrate how this supported, and was supported by, the seemingly unrelated development of narrative-driven visualization via TEDx, of the creation and the setting of an art installation and the curating of digital and physical artefacts. We assert that existing models of visualization design, of tool/technique development and of insight generation do not adequately capture the richness of parallel engagement via these multiple channels of communication. We argue that developing multiple channels in parallel opens up opportunities for visualization design and analysis by building trust and authority and supporting creativity. This rich, non-sequential approach to visualization design is likely to foster serendipity, deepen insight and increase impact.},
}
@article{p185,
journal = {IEEE TVCG},
year = 2014,
title = {Multivariate Network Exploration and Presentation: From Detail to Overview via Selections and Aggregations},
doi = {10.1109/TVCG.2014.2346441},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346441},
author = {van den Elzen, S. and van Wijk, J.J.},
pages = {2310--2319},
keywords = {Multivariate Networks, Selections of Interest, Interaction, Direct Manipulation},
abstract = {Network data is ubiquitous; e-mail traffic between persons, telecommunication, transport and financial networks are some examples. Often these networks are large and multivariate, besides the topological structure of the network, multivariate data on the nodes and links is available. Currently, exploration and analysis methods are focused on a single aspect; the network topology or the multivariate data. In addition, tools and techniques are highly domain specific and require expert knowledge. We focus on the non-expert user and propose a novel solution for multivariate network exploration and analysis that tightly couples structural and multivariate analysis. In short, we go from Detail to Overview via Selections and Aggregations (DOSA): users are enabled to gain insights through the creation of selections of interest (manually or automatically), and producing high-level, infographic-style overviews simultaneously. Finally, we present example explorations on real-world datasets that demonstrate the effectiveness of our method for the exploration and understanding of multivariate networks where presentation of findings comes for free.},
}
@article{p186,
journal = {IEEE TVCG},
year = 2014,
title = {NeuroLines: A Subway Map Metaphor for Visualizing Nanoscale Neuronal Connectivity},
doi = {10.1109/TVCG.2014.2346312},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346312},
author = {Al-Awami, A. and Beyer, J. and Strobelt, H. and Kasthuri, N. and Lichtman, J. and Pfister, H. and Hadwiger, M.},
pages = {2369--2378},
keywords = {Connectomics, Neuroscience, Data Abstraction, Multi-Trees, Focus+Context},
abstract = {We present NeuroLines, a novel visualization technique designed for scalable detailed analysis of neuronal connectivity at the nanoscale level. The topology of 3D brain tissue data is abstracted into a multi-scale, relative distance-preserving subway map visualization that allows domain scientists to conduct an interactive analysis of neurons and their connectivity. Nanoscale connectomics aims at reverse-engineering the wiring of the brain. Reconstructing and analyzing the detailed connectivity of neurons and neurites (axons, dendrites) will be crucial for understanding the brain and its development and diseases. However, the enormous scale and complexity of nanoscale neuronal connectivity pose big challenges to existing visualization techniques in terms of scalability. NeuroLines offers a scalable visualization framework that can interactively render thousands of neurites, and that supports the detailed analysis of neuronal structures and their connectivity. We describe and analyze the design of NeuroLines based on two real-world use-cases of our collaborators in developmental neuroscience, and investigate its scalability to large-scale neuronal connectivity data.},
}
@article{p187,
journal = {IEEE TVCG},
year = 2014,
title = {Nmap: A Novel Neighborhood Preservation Space-filling Algorithm},
doi = {10.1109/TVCG.2014.2346276},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346276},
author = {Duarte, F.S.L.G. and Sikansi, F. and Fatore, F.M. and Fadel, S.G. and Paulovich, F.V.},
pages = {2063--2071},
keywords = {Space-filling techniques, treemaps, distance-similarity preservation},
abstract = {Space-filling techniques seek to use as much as possible the visual space to represent a dataset, splitting it into regions that represent the data elements. Amongst those techniques, Treemaps have received wide attention due to its simplicity, reduced visual complexity, and compact use of the available space. Several different Treemap algorithms have been proposed, however the core idea is the same, to divide the visual space into rectangles with areas proportional to some data attribute or weight. Although pleasant layouts can be effectively produced by the existing techniques, most of them do not take into account relationships that might exist between different data elements when partitioning the visual space. This violates the distance-similarity metaphor, that is, close rectangles do not necessarily represent similar data elements. In this paper, we propose a novel approach, called Neighborhood Treemap (Nmap), that seeks to solve this limitation by employing a slice and scale strategy where the visual space is successively bisected on the horizontal or vertical directions and the bisections are scaled until one rectangle is defined per data element. Compared to the current techniques with the same similarity preservation goal, our approach presents the best results while being two to three orders of magnitude faster. The usefulness of Nmap is shown by two applications involving the organization of document collections and the construction of cartograms illustrating its effectiveness on different scenarios.},
}
@article{p188,
journal = {IEEE TVCG},
year = 2014,
title = {Node, Node-Link, and Node-Link-Group Diagrams: An Evaluation},
doi = {10.1109/TVCG.2014.2346422},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346422},
author = {Saket, B. and Simonetto, P. and Kobourov, S. and Borner, K.},
pages = {2231--2240},
keywords = {graphs, networks, maps, scatter plots},
abstract = {Effectively showing the relationships between objects in a dataset is one of the main tasks in information visualization. Typically there is a well-defined notion of distance between pairs of objects, and traditional approaches such as principal component analysis or multi-dimensional scaling are used to place the objects as points in 2D space, so that similar objects are close to each other. In another typical setting, the dataset is visualized as a network graph, where related nodes are connected by links. More recently, datasets are also visualized as maps, where in addition to nodes and links, there is an explicit representation of groups and clusters. We consider these three Techniques, characterized by a progressive increase of the amount of encoded information: node diagrams, node-link diagrams and node-link-group diagrams. We assess these three types of diagrams with a controlled experiment that covers nine different tasks falling broadly in three categories: node-based tasks, network-based tasks and group-based tasks. Our findings indicate that adding links, or links and group representations, does not negatively impact performance (time and accuracy) of node-based tasks. Similarly, adding group representations does not negatively impact the performance of network-based tasks. Node-link-group diagrams outperform the others on group-based tasks. These conclusions contradict results in other studies, in similar but subtly different settings. Taken together, however, such results can have significant implications for the design of standard and domain snecific visualizations tools.},
}
@article{p189,
journal = {IEEE TVCG},
year = 2014,
title = {OnSet: A Visualization Technique for Large-scale Binary Set Data},
doi = {10.1109/TVCG.2014.2346249},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346249},
author = {Sadana, R. and Major, T. and Dove, A. and Stasko, J.},
pages = {1993--2002},
keywords = {Set visualization, information visualization, direct manipulation, Euler diagrams, interaction, logical operations},
abstract = {Visualizing sets to reveal relationships between constituent elements is a complex representational problem. Recent research presents several automated placement and grouping techniques to highlight connections between set elements. However, these techniques do not scale well for sets with cardinality greater than one hundred elements. We present OnSet, an interactive, scalable visualization technique for representing large-scale binary set data. The visualization technique defines a single, combined domain of elements for all sets, and models each set by the elements that it both contains and does not contain. OnSet employs direct manipulation interaction and visual highlighting to support easy identification of commonalities and differences as well as membership patterns across different sets of elements. We present case studies to illustrate how the technique can be successfully applied across different domains such as bio-chemical metabolomics and task and event scheduling.},
}
@article{p190,
journal = {IEEE TVCG},
year = 2014,
title = {Order of Magnitude Markers: An Empirical Study on Large Magnitude Number Detection},
doi = {10.1109/TVCG.2014.2346428},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346428},
author = {Borgo, R. and Dearden, J. and Jones, M.W.},
pages = {2261--2270},
keywords = {Orders of magnitude, bar charts, logarithmic scale},
abstract = {In this paper we introduce Order of Magnitude Markers (OOMMs) as a new technique for number representation. The motivation for this work is that many data sets require the depiction and comparison of numbers that have varying orders of magnitude. Existing techniques for representation use bar charts, plots and colour on linear or logarithmic scales. These all suffer from related problems. There is a limit to the dynamic range available for plotting numbers, and so the required dynamic range of the plot can exceed that of the depiction method. When that occurs, resolving, comparing and relating values across the display becomes problematical or even impossible for the user. With this in mind, we present an empirical study in which we compare logarithmic, linear, scale-stack bars and our new markers for 11 different stimuli grouped into 4 different tasks across all 8 marker types.},
}
@article{p191,
journal = {IEEE TVCG},
year = 2014,
title = {Origin-Destination Flow Data Smoothing and Mapping},
doi = {10.1109/TVCG.2014.2346271},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346271},
author = {Diansheng Guo and Xi Zhu},
pages = {2043--2052},
keywords = {flow mapping, kernel smoothing, generalization, multi-resolution mapping, graph drawing, spatial data mining},
abstract = {This paper presents a new approach to flow mapping that extracts inherent patterns from massive geographic mobility data and constructs effective visual representations of the data for the understanding of complex flow trends. This approach involves a new method for origin-destination flow density estimation and a new method for flow map generalization, which together can remove spurious data variance, normalize flows with control population, and detect high-level patterns that are not discernable with existing approaches. The approach achieves three main objectives in addressing the challenges for analyzing and mapping massive flow data. First, it removes the effect of size differences among spatial units via kernel-based density estimation, which produces a measurement of flow volume between each pair of origin and destination. Second, it extracts major flow patterns in massive flow data through a new flow sampling method, which filters out duplicate information in the smoothed flows. Third, it enables effective flow mapping and allows intuitive perception of flow patterns among origins and destinations without bundling or altering flow paths. The approach can work with both point-based flow data (such as taxi trips with GPS locations) and area-based flow data (such as county-to-county migration). Moreover, the approach can be used to detect and compare flow patterns at different scales or in relatively sparse flow datasets, such as migration for each age group. We evaluate and demonstrate the new approach with case studies of U.S. migration data and experiments with synthetic data.},
}
@article{p192,
journal = {IEEE TVCG},
year = 2014,
title = {Overview: The Design, Adoption, and Analysis of a Visual Document Mining Tool for Investigative Journalists},
doi = {10.1109/TVCG.2014.2346431},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346431},
author = {Brehmer, M. and Ingram, S. and Stray, J. and Munzner, T.},
pages = {2271--2280},
keywords = {Design study, investigative journalism, task and requirements analysis, text and document data, text analysis},
abstract = {For an investigative journalist, a large collection of documents obtained from a Freedom of Information Act request or a leak is both a blessing and a curse: such material may contain multiple newsworthy stories, but it can be difficult and time consuming to find relevant documents. Standard text search is useful, but even if the search target is known it may not be possible to formulate an effective query. In addition, summarization is an important non-search task. We present Overview, an application for the systematic analysis of large document collections based on document clustering, visualization, and tagging. This work contributes to the small set of design studies which evaluate a visualization system ÔÇ£in the wildÔÇØ, and we report on six case studies where Overview was voluntarily used by self-initiated journalists to produce published stories. We find that the frequently-used language of ÔÇ£exploringÔÇØ a document collection is both too vague and too narrow to capture how journalists actually used our application. Our iterative process, including multiple rounds of deployment and observations of real world usage, led to a much more specific characterization of tasks. We analyze and justify the visual encoding and interaction techniques used in Overview's design with respect to our final task abstractions, and propose generalizable lessons for visualization design methodology.},
}
@article{p193,
journal = {IEEE TVCG},
year = 2014,
title = {PanoramicData: Data Analysis through Pen & Touch},
doi = {10.1109/TVCG.2014.2346293},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346293},
author = {Zgraggen, E. and Zeleznik, R. and Drucker, S.},
pages = {2112--2121},
keywords = {Visual analytics, pen and touch, user interfaces, interaction design, coordinated and multiple views},
abstract = {Interactively exploring multidimensional datasets requires frequent switching among a range of distinct but inter-related tasks (e.g., producing different visuals based on different column sets, calculating new variables, and observing the interactions between sets of data). Existing approaches either target specific different problem domains (e.g., data-transformation or data-presentation) or expose only limited aspects of the general exploratory process; in either case, users are forced to adopt coping strategies (e.g., arranging windows or using undo as a mechanism for comparison instead of using side-by-side displays) to compensate for the lack of an integrated suite of exploratory tools. PanoramicData (PD) addresses these problems by unifying a comprehensive set of tools for visual data exploration into a hybrid pen and touch system designed to exploit the visualization advantages of large interactive displays. PD goes beyond just familiar visualizations by including direct UI support for data transformation and aggregation, filtering and brushing. Leveraging an unbounded whiteboard metaphor, users can combine these tools like building blocks to create detailed interactive visual display networks in which each visualization can act as a filter for others. Further, by operating directly on relational-databases, PD provides an approachable visual language that exposes a broad set of the expressive power of SQL including functionally complete logic filtering, computation of aggregates and natural table joins. To understand the implications of this novel approach, we conducted a formative user study with both data and visualization experts. The results indicated that the system provided a fluid and natural user experience for probing multi-dimensional data and was able to cover the full range of queries that the users wanted to pose.},
}
@article{p194,
journal = {IEEE TVCG},
year = 2014,
title = {Ranking Visualizations of Correlation Using Weber's Law},
doi = {10.1109/TVCG.2014.2346979},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346979},
author = {Harrison, L. and Fumeng Yang and Franconeri, S. and Chang, R.},
pages = {1943--1952},
keywords = {Perception, Visualization, Evaluation},
abstract = {Despite years of research yielding systems and guidelines to aid visualization design, practitioners still face the challenge of identifying the best visualization for a given dataset and task. One promising approach to circumvent this problem is to leverage perceptual laws to quantitatively evaluate the effectiveness of a visualization design. Following previously established methodologies, we conduct a large scale (n = 1687) crowdsourced experiment to investigate whether the perception of correlation in nine commonly used visualizations can be modeled using Weber's law. The results of this experiment contribute to our understanding of information visualization by establishing that: (1) for all tested visualizations, the precision of correlation judgment could be modeled by Weber's law, (2) correlation judgment precision showed striking variation between negatively and positively correlated data, and (3) Weber models provide a concise means to quantify, compare, and rank the perceptual precision afforded by a visualization.},
}
@article{p195,
journal = {IEEE TVCG},
year = 2014,
title = {Reinforcing Visual Grouping Cues to Communicate Complex Informational Structure},
doi = {10.1109/TVCG.2014.2346998},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346998},
author = {Bae, J. and Watson, B.},
pages = {1973--1982},
keywords = {Visual grouping, visual hierarchy, gestalt principles, perception, visual communication},
abstract = {In his book Multimedia Learning [7], Richard Mayer asserts that viewers learn best from imagery that provides them with cues to help them organize new information into the correct knowledge structures. Designers have long been exploiting the Gestalt laws of visual grouping to deliver viewers those cues using visual hierarchy, often communicating structures much more complex than the simple organizations studied in psychological research. Unfortunately, designers are largely practical in their work, and have not paused to build a complex theory of structural communication. If we are to build a tool to help novices create effective and well structured visuals, we need a better understanding of how to create them. Our work takes a first step toward addressing this lack, studying how five of the many grouping cues (proximity, color similarity, common region, connectivity, and alignment) can be effectively combined to communicate structured text and imagery from real world examples. To measure the effectiveness of this structural communication, we applied a digital version of card sorting, a method widely used in anthropology and cognitive science to extract cognitive structures. We then used tree edit distance to measure the difference between perceived and communicated structures. Our most significant findings are: 1) with careful design, complex structure can be communicated clearly; 2) communicating complex structure is best done with multiple reinforcing grouping cues; 3) common region (use of containers such as boxes) is particularly effective at communicating structure; and 4) alignment is a weak structural communicator.},
}
@article{p196,
journal = {IEEE TVCG},
year = 2014,
title = {Revisiting Bertin Matrices: New Interactions for Crafting Tabular Visualizations},
doi = {10.1109/TVCG.2014.2346279},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346279},
author = {Perin, C. and Dragicevic, P. and Fekete, J.},
pages = {2082--2091},
keywords = {Visualization, Interaction, Tabular Data, Bertin, Crossing, Crossets},
abstract = {We present Bertifier, a web app for rapidly creating tabular visualizations from spreadsheets. Bertifier draws from Jacques Bertin's matrix analysis method, whose goal was to ÔÇ£simplify without destroyingÔÇØ by encoding cell values visually and grouping similar rows and columns. Although there were several attempts to bring this method to computers, no implementation exists today that is both exhaustive and accessible to a large audience. Bertifier remains faithful to Bertin's method while leveraging the power of today's interactive computers. Tables are formatted and manipulated through crossets, a new interaction technique for rapidly applying operations on rows and columns. We also introduce visual reordering, a semi-interactive reordering approach that lets users apply and tune automatic reordering algorithms in a WYSIWYG manner. Sessions with eight users from different backgrounds suggest that Bertifier has the potential to bring Bertin's method to a wider audience of both technical and non-technical users, and empower them with data analysis and communication tools that were so far only accessible to a handful of specialists.COMPUTER},
}
@article{p197,
journal = {IEEE TVCG},
year = 2014,
title = {Stenomaps: Shorthand for shapes},
doi = {10.1109/TVCG.2014.2346274},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346274},
author = {van Goethem, A. and Reimer, A. and Speckmann, B. and Wood, J.},
pages = {2053--2062},
keywords = {Schematisation, Maps, Algorithm, Design},
abstract = {We address some of the challenges in representing spatial data with a novel form of geometric abstraction-the stenomap. The stenomap comprises a series of smoothly curving linear glyphs that each represent both the boundary and the area of a polygon. We present an efficient algorithm to automatically generate these open, C1-continuous splines from a set of input polygons. Feature points of the input polygons are detected using the medial axis to maintain important shape properties. We use dynamic programming to compute a planar non-intersecting spline representing each polygon's base shape. The results are stylised glyphs whose appearance may be parameterised and that offer new possibilities in the 'cartographic design space'. We compare our glyphs with existing forms of geometric schematisation and discuss their relative merits and shortcomings. We describe several use cases including the depiction of uncertain model data in the form of hurricane track forecasting; minimal ink thematic mapping; and the depiction of continuous statistical data.},
}
@article{p198,
journal = {IEEE TVCG},
year = 2014,
title = {TenniVis: Visualization for Tennis Match Analysis},
doi = {10.1109/TVCG.2014.2346445},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346445},
author = {Polk, T. and Jing Yang and Yueqi Hu and Ye Zhao},
pages = {2339--2348},
keywords = {Visual knowledge discovery, sports analytics, tennis visualization},
abstract = {Existing research efforts into tennis visualization have primarily focused on using ball and player tracking data to enhance professional tennis broadcasts and to aid coaches in helping their students. Gathering and analyzing this data typically requires the use of an array of synchronized cameras, which are expensive for non-professional tennis matches. In this paper, we propose TenniVis, a novel tennis match visualization system that relies entirely on data that can be easily collected, such as score, point outcomes, point lengths, service information, and match videos that can be captured by one consumer-level camera. It provides two new visualizations to allow tennis coaches and players to quickly gain insights into match performance. It also provides rich interactions to support ad hoc hypothesis development and testing. We first demonstrate the usefulness of the system by analyzing the 2007 Australian Open men's singles final. We then validate its usability by two pilot user studies where two college tennis coaches analyzed the matches of their own players. The results indicate that useful insights can quickly be discovered and ad hoc hypotheses based on these insights can conveniently be tested through linked match videos.},
}
@article{p199,
journal = {IEEE TVCG},
year = 2014,
title = {The Effects of Interactive Latency on Exploratory Visual Analysis},
doi = {10.1109/TVCG.2014.2346452},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346452},
author = {Zhicheng Liu and Heer, J.},
pages = {2122--2131},
keywords = {Interaction, latency, exploratory analysis, interactive visualization, scalability, user performance, verbal analysis},
abstract = {To support effective exploration, it is often stated that interactive visualizations should provide rapid response times. However, the effects of interactive latency on the process and outcomes of exploratory visual analysis have not been systematically studied. We present an experiment measuring user behavior and knowledge discovery with interactive visualizations under varying latency conditions. We observe that an additional delay of 500ms incurs significant costs, decreasing user activity and data set coverage. Analyzing verbal data from think-aloud protocols, we find that increased latency reduces the rate at which users make observations, draw generalizations and generate hypotheses. Moreover, we note interaction effects in which initial exposure to higher latencies leads to subsequently reduced performance in a low-latency setting. Overall, increased latency causes users to shift exploration strategy, in turn affecting performance. We discuss how these results can inform the design of interactive analysis tools.},
}
@article{p200,
journal = {IEEE TVCG},
year = 2014,
title = {The Influence of Contour on Similarity Perception of Star Glyphs},
doi = {10.1109/TVCG.2014.2346426},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346426},
author = {Fuchs, J. and Isenberg, P. and Bezerianos, A. and Fischer, F. and Bertini, E.},
pages = {2251--2260},
keywords = {Glyphs, star glyphs, contours, perception, quantitative evaluation, similarity detection, visual comparison},
abstract = {We conducted three experiments to investigate the effects of contours on the detection of data similarity with star glyph variations. A star glyph is a small, compact, data graphic that represents a multi-dimensional data point. Star glyphs are often used in small-multiple settings, to represent data points in tables, on maps, or as overlays on other types of data graphics. In these settings, an important task is the visual comparison of the data points encoded in the star glyph, for example to find other similar data points or outliers. We hypothesized that for data comparisons, the overall shape of a star glyph-enhanced through contour lines-would aid the viewer in making accurate similarity judgments. To test this hypothesis, we conducted three experiments. In our first experiment, we explored how the use of contours influenced how visualization experts and trained novices chose glyphs with similar data values. Our results showed that glyphs without contours make the detection of data similarity easier. Given these results, we conducted a second study to understand intuitive notions of similarity. Star glyphs without contours most intuitively supported the detection of data similarity. In a third experiment, we tested the effect of star glyph reference structures (i.e., tickmarks and gridlines) on the detection of similarity. Surprisingly, our results show that adding reference structures does improve the correctness of similarity judgments for star glyphs with contours, but not for the standard star glyph. As a result of these experiments, we conclude that the simple star glyph without contours performs best under several criteria, reinforcing its practice and popularity in the literature. Contours seem to enhance the detection of other types of similarity, e. g., shape similarity and are distracting when data similarity has to be judged. Based on these findings we provide design considerations regarding the use of contours and reference structures on star glyp- s.},
}
@article{p201,
journal = {IEEE TVCG},
year = 2014,
title = {The Not-so-Staggering Effect of Staggered Animated Transitions on Visual Tracking},
doi = {10.1109/TVCG.2014.2346424},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346424},
author = {Chevalier, F. and Dragicevic, P. and Franconeri, S.},
pages = {2241--2250},
keywords = {Animated transitions, staggered animation, visual tracking},
abstract = {Interactive visual applications often rely on animation to transition from one display state to another. There are multiple animation techniques to choose from, and it is not always clear which should produce the best visual correspondences between display elements. One major factor is whether the animation relies on staggering-an incremental delay in start times across the moving elements. It has been suggested that staggering may reduce occlusion, while also reducing display complexity and producing less overwhelming animations, though no empirical evidence has demonstrated these advantages. Work in perceptual psychology does show that reducing occlusion, and reducing inter-object proximity (crowding) more generally, improves performance in multiple object tracking. We ran simulations confirming that staggering can in some cases reduce crowding in animated transitions involving dot clouds (as found in, e.g., animated 2D scatterplots). We empirically evaluated the effect of two staggering techniques on tracking tasks, focusing on cases that should most favour staggering. We found that introducing staggering has a negligible, or even negative, impact on multiple object tracking performance. The potential benefits of staggering may be outweighed by strong costs: a loss of common-motion grouping information about which objects travel in similar paths, and less predictability about when any specific object would begin to move. Staggering may be beneficial in some conditions, but they have yet to be demonstrated. The present results are a significant step toward a better understanding of animation pacing, and provide direction for further research.},
}
@article{p202,
journal = {IEEE TVCG},
year = 2014,
title = {The Persuasive Power of Data Visualization},
doi = {10.1109/TVCG.2014.2346419},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346419},
author = {Pandey, A.V. and Manivannan, A. and Nov, O. and Satterthwaite, M. and Bertini, E.},
pages = {2211--2220},
keywords = {Persuasive visualization, elaboration likelihood model, evaluation},
abstract = {Data visualization has been used extensively to inform users. However, little research has been done to examine the effects of data visualization in influencing users or in making a message more persuasive. In this study, we present experimental research to fill this gap and present an evidence-based analysis of persuasive visualization. We built on persuasion research from psychology and user interfaces literature in order to explore the persuasive effects of visualization. In this experimental study we define the circumstances under which data visualization can make a message more persuasive, propose hypotheses, and perform quantitative and qualitative analyses on studies conducted to test these hypotheses. We compare visual treatments with data presented through barcharts and linecharts on the one hand, treatments with data presented through tables on the other, and then evaluate their persuasiveness. The findings represent a first step in exploring the effectiveness of persuasive visualization.},
}
@article{p203,
journal = {IEEE TVCG},
year = 2014,
title = {The relation between visualization size, grouping, and user performance},
doi = {10.1109/TVCG.2014.2346983},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346983},
author = {Gramazio, C. and Schloss, K.B. and Laidlaw, D.H.},
pages = {1953--1962},
keywords = {information visualization, graphical perception, size, layout},
abstract = {In this paper we make the following contributions: (1) we describe how the grouping, quantity, and size of visual marks affects search time based on the results from two experiments; (2) we report how search performance relates to self-reported difficulty in finding the target for different display types; and (3) we present design guidelines based on our findings to facilitate the design of effective visualizations. Both Experiment 1 and 2 asked participants to search for a unique target in colored visualizations to test how the grouping, quantity, and size of marks affects user performance. In Experiment 1, the target square was embedded in a grid of squares and in Experiment 2 the target was a point in a scatterplot. Search performance was faster when colors were spatially grouped than when they were randomly arranged. The quantity of marks had little effect on search time for grouped displays (ÔÇ£pop-outÔÇØ), but increasing the quantity of marks slowed reaction time for random displays. Regardless of color layout (grouped vs. random), response times were slowest for the smallest mark size and decreased as mark size increased to a point, after which response times plateaued. In addition to these two experiments we also include potential application areas, as well as results from a small case study where we report preliminary findings that size may affect how users infer how visualizations should be used. We conclude with a list of design guidelines that focus on how to best create visualizations based on grouping, quantity, and size of visual marks.},
}
@article{p204,
journal = {IEEE TVCG},
year = 2014,
title = {Tree Colors: Color Schemes for Tree-Structured Data},
doi = {10.1109/TVCG.2014.2346277},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346277},
author = {Tennekes, M. and de Jonge, E.},
pages = {2072--2081},
keywords = {Color schemes, statistical graphics, hierarchical data},
abstract = {We present a method to map tree structures to colors from the Hue-Chroma-Luminance color model, which is known for its well balanced perceptual properties. The Tree Colors method can be tuned with several parameters, whose effect on the resulting color schemes is discussed in detail. We provide a free and open source implementation with sensible parameter defaults. Categorical data are very common in statistical graphics, and often these categories form a classification tree. We evaluate applying Tree Colors to tree structured data with a survey on a large group of users from a national statistical institute. Our user study suggests that Tree Colors are useful, not only for improving node-link diagrams, but also for unveiling tree structure in non-hierarchical visualizations.},
}
@article{p205,
journal = {IEEE TVCG},
year = 2014,
title = {UpSet: Visualization of Intersecting Sets},
doi = {10.1109/TVCG.2014.2346248},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346248},
author = {Lex, A. and Gehlenborg, N. and Strobelt, H. and Vuillemot, R. and Pfister, H.},
pages = {1983--1992},
keywords = {Sets, set visualization, sets intersections, set attributes, set relationships, multidimensional data},
abstract = {Understanding relationships between sets is an important analysis task that has received widespread attention in the visualization community. The major challenge in this context is the combinatorial explosion of the number of set intersections if the number of sets exceeds a trivial threshold. In this paper we introduce UpSet, a novel visualization technique for the quantitative analysis of sets, their intersections, and aggregates of intersections. UpSet is focused on creating task-driven aggregates, communicating the size and properties of aggregates and intersections, and a duality between the visualization of the elements in a dataset and their set membership. UpSet visualizes set intersections in a matrix layout and introduces aggregates based on groupings and queries. The matrix layout enables the effective representation of associated data, such as the number of elements in the aggregates and intersections, as well as additional summary statistics derived from subset or element attributes. Sorting according to various measures enables a task-driven analysis of relevant intersections and aggregates. The elements represented in the sets and their associated attributes are visualized in a separate view. Queries based on containment in specific intersections, aggregates or driven by attribute filters are propagated between both views. We also introduce several advanced visual encodings and interaction methods to overcome the problems of varying scales and to address scalability. UpSet is web-based and open source. We demonstrate its general utility in multiple use cases from various domains.},
}
@article{p206,
journal = {IEEE TVCG},
year = 2014,
title = {Visual Parameter Space Analysis: A Conceptual Framework},
doi = {10.1109/TVCG.2014.2346321},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346321},
author = {Sedlmair, M. and Heinzl, C. and Bruckner, S. and Piringer, H. and Moller, T.},
pages = {2161--2170},
keywords = {Parameter space analysis, input-output model, simulation, task characterization, literature analysis},
abstract = {Various case studies in different application domains have shown the great potential of visual parameter space analysis to support validating and using simulation models. In order to guide and systematize research endeavors in this area, we provide a conceptual framework for visual parameter space analysis problems. The framework is based on our own experience and a structured analysis of the visualization literature. It contains three major components: (1) a data flow model that helps to abstractly describe visual parameter space analysis problems independent of their application domain; (2) a set of four navigation strategies of how parameter space analysis can be supported by visualization tools; and (3) a characterization of six analysis tasks. Based on our framework, we analyze and classify the current body of literature, and identify three open research gaps in visual parameter space analysis. The framework and its discussion are meant to support visualization designers and researchers in characterizing parameter space analysis problems and to guide their design and evaluation processes.},
}
@article{p207,
journal = {IEEE TVCG},
year = 2014,
title = {Visualizing Statistical Mix Effects and Simpson's Paradox},
doi = {10.1109/TVCG.2014.2346297},
url = {http://dx.doi.org/10.1109/TVCG.2014.2346297},
author = {Armstrong, Z. and Wattenberg, M.},
pages = {2132--2141},
keywords = {Mix effects, Omitted variable bias, Simpson's paradox, Statistics},
abstract = {We discuss how ÔÇ£mix effectsÔÇØ can surprise users of visualizations and potentially lead them to incorrect conclusions. This statistical issue (also known as ÔÇ£omitted variable biasÔÇØ or, in extreme cases, as ÔÇ£Simpson's paradoxÔÇØ) is widespread and can affect any visualization in which the quantity of interest is an aggregated value such as a weighted sum or average. Our first contribution is to document how mix effects can be a serious issue for visualizations, and we analyze how mix effects can cause problems in a variety of popular visualization techniques, from bar charts to treemaps. Our second contribution is a new technique, the ÔÇ£comet chart,ÔÇØ that is meant to ameliorate some of these issues.},
}
@article{p296,
journal = {IEEE TVCG},
year = 2013,
title = {A Deeper Understanding of Sequence in Narrative Visualization},
doi = {10.1109/TVCG.2013.119},
url = {http://dx.doi.org/10.1109/TVCG.2013.119},
author = {Hullman, J. and Drucker, S. and Riche, N.H. and Bongshin Lee and Fisher, D. and Adar, E.},
pages = {2406--2415},
keywords = {Data storytelling, narrative visualization, narrative structure},
abstract = {Conveying a narrative with visualizations often requires choosing an order in which to present visualizations. While evidence exists that narrative sequencing in traditional stories can affect comprehension and memory, little is known about how sequencing choices affect narrative visualization. We consider the forms and reactions to sequencing in narrative visualization presentations to provide a deeper understanding with a focus on linear, 'slideshow-style' presentations. We conduct a qualitative analysis of 42 professional narrative visualizations to gain empirical knowledge on the forms that structure and sequence take. Based on the results of this study we propose a graph-driven approach for automatically identifying effective sequences in a set of visualizations to be presented linearly. Our approach identifies possible transitions in a visualization set and prioritizes local (visualization-to-visualization) transitions based on an objective function that minimizes the cost of transitions from the audience perspective. We conduct two studies to validate this function. We also expand the approach with additional knowledge of user preferences for different types of local transitions and the effects of global sequencing strategies on memory, preference, and comprehension. Our results include a relative ranking of types of visualization transitions by the audience perspective and support for memory and subjective rating benefits of visualization sequences that use parallelism as a structural device. We discuss how these insights can guide the design of narrative visualization and systems that support optimization of visualization sequence.},
}
@article{p297,
journal = {IEEE TVCG},
year = 2013,
title = {A Design Space of Visualization Tasks},
doi = {10.1109/TVCG.2013.120},
url = {http://dx.doi.org/10.1109/TVCG.2013.120},
author = {Schulz, H. and Nocke, T. and Heitzler, M. and Schumann, H.},
pages = {2366--2375},
keywords = {Task taxonomy, design space, climate impact research, visualization recommendation},
abstract = {Knowledge about visualization tasks plays an important role in choosing or building suitable visual representations to pursue them. Yet, tasks are a multi-faceted concept and it is thus not surprising that the many existing task taxonomies and models all describe different aspects of tasks, depending on what these task descriptions aim to capture. This results in a clear need to bring these different aspects together under the common hood of a general design space of visualization tasks, which we propose in this paper. Our design space consists of five design dimensions that characterize the main aspects of tasks and that have so far been distributed across different task descriptions. We exemplify its concrete use by applying our design space in the domain of climate impact research. To this end, we propose interfaces to our design space for different user roles (developers, authors, and end users) that allow users of different levels of expertise to work with it.},
}
@article{p298,
journal = {IEEE TVCG},
year = 2013,
title = {A Model for Structure-Based Comparison of Many Categories in Small-Multiple Displays},
doi = {10.1109/TVCG.2013.122},
url = {http://dx.doi.org/10.1109/TVCG.2013.122},
author = {Kehrer, J. and Piringer, H. and Berger, W. and Groller, E.},
pages = {2287--2296},
keywords = {Comparative visualization, small-multiple displays, trellis displays, categorical data},
abstract = {Many application domains deal with multi-variate data that consist of both categorical and numerical information. Small-multiple displays are a powerful concept for comparing such data by juxtaposition. For comparison by overlay or by explicit encoding of computed differences, however, a specification of references is necessary. In this paper, we present a formal model for defining semantically meaningful comparisons between many categories in a small-multiple display. Based on pivotized data that are hierarchically partitioned by the categories assigned to the x and y axis of the display, we propose two alternatives for structure-based comparison within this hierarchy. With an absolute reference specification, categories are compared to a fixed reference category. With a relative reference specification, in contrast, a semantic ordering of the categories is considered when comparing them either to the previous or subsequent category each. Both reference specifications can be defined at multiple levels of the hierarchy (including aggregated summaries), enabling a multitude of useful comparisons. We demonstrate the general applicability of our model in several application examples using different visualizations that compare data by overlay or explicit encoding of differences.},
}
@article{p299,
journal = {IEEE TVCG},
year = 2013,
title = {A Multi-Level Typology of Abstract Visualization Tasks},
doi = {10.1109/TVCG.2013.124},
url = {http://dx.doi.org/10.1109/TVCG.2013.124},
author = {Brehmer, M. and Munzner, T.},
pages = {2376--2385},
keywords = {Typology, visualization models, task and requirements analysis, qualitative evaluation},
abstract = {The considerable previous work characterizing visualization usage has focused on low-level tasks or interactions and high-level tasks, leaving a gap between them that is not addressed. This gap leads to a lack of distinction between the ends and means of a task, limiting the potential for rigorous analysis. We contribute a multi-level typology of visualization tasks to address this gap, distinguishing why and how a visualization task is performed, as well as what the task inputs and outputs are. Our typology allows complex tasks to be expressed as sequences of interdependent simpler tasks, resulting in concise and flexible descriptions for tasks of varying complexity and scope. It provides abstract rather than domain-specific descriptions of tasks, so that useful comparisons can be made between visualization systems targeted at different application domains. This descriptive power supports a level of analysis required for the generation of new designs, by guiding the translation of domain-specific problems into abstract tasks, and for the qualitative evaluation of visualization usage. We demonstrate the benefits of our approach in a detailed case study, comparing task descriptions from our typology to those derived from related work. We also discuss the similarities and differences between our typology and over two dozen extant classification systems and theoretical frameworks from the literatures of visualization, human-computer interaction, information retrieval, communications, and cartography.},
}
@article{p300,
journal = {IEEE TVCG},
year = 2013,
title = {An Empirically-Derived Taxonomy of Interaction Primitives for Interactive Cartography and Geovisualization},
doi = {10.1109/TVCG.2013.130},
url = {http://dx.doi.org/10.1109/TVCG.2013.130},
author = {Roth, R.E.},
pages = {2356--2365},
keywords = {Science of interaction, interaction primitives, interactive maps, geovisualization, interaction techniques},
abstract = {Proposals to establish a 'science of interaction' have been forwarded from Information Visualization and Visual Analytics, as well as Cartography, Geovisualization, and GIScience. This paper reports on two studies to contribute to this call for an interaction science, with the goal of developing a functional taxonomy of interaction primitives for map-based visualization. A semi-structured interview study first was conducted with 21 expert interactive map users to understand the way in which map-based visualizations currently are employed. The interviews were transcribed and coded to identify statements representative of either the task the user wished to accomplish (i.e., objective primitives) or the interactive functionality included in the visualization to achieve this task (i.e., operator primitives). A card sorting study then was conducted with 15 expert interactive map designers to organize these example statements into logical structures based on their experience translating client requests into interaction designs. Example statements were supplemented with primitive definitions in the literature and were separated into two sorting exercises: objectives and operators. The objective sort suggested five objectives that increase in cognitive sophistication (identify, compare, rank, associate, & delineate), but exhibited a large amount of variation across participants due to consideration of broader user goals (procure, predict, & prescribe) and interaction operands (space-alone, attributes-in-space, & space-in-time; elementary & general). The operator sort suggested five enabling operators (import, export, save, edit, & annotate) and twelve work operators (reexpress, arrange, sequence, resymbolize, overlay, pan, zoom, reproject, search, filter, retrieve, & calculate). This taxonomy offers an empirically-derived and ecologically-valid structure to inform future research and design on interaction.},
}
@article{p301,
journal = {IEEE TVCG},
year = 2013,
title = {An Interaction Model for Visualizations Beyond The Desktop},
doi = {10.1109/TVCG.2013.134},
url = {http://dx.doi.org/10.1109/TVCG.2013.134},
author = {Jansen, Y. and Dragicevic, P.},
pages = {2396--2405},
keywords = {Information visualization, interaction model, notational system, physical visualization},
abstract = {We present an interaction model for beyond-desktop visualizations that combines the visualization reference model with the instrumental interaction paradigm. Beyond-desktop visualizations involve a wide range of emerging technologies such as wall-sized displays, 3D and shape-changing displays, touch and tangible input, and physical information visualizations. While these technologies allow for new forms of interaction, they are often studied in isolation. New conceptual models are needed to build a coherent picture of what has been done and what is possible. We describe a modified pipeline model where raw data is processed into a visualization and then rendered into the physical world. Users can explore or change data by directly manipulating visualizations or through the use of instruments. Interactions can also take place in the physical world outside the visualization system, such as when using locomotion to inspect a large scale visualization. Through case studies we illustrate how this model can be used to describe both conventional and unconventional interactive visualization systems, and compare different design alternatives.},
}
@article{p302,
journal = {IEEE TVCG},
year = 2013,
title = {Automatic Layout of Structured Hierarchical Reports},
doi = {10.1109/TVCG.2013.137},
url = {http://dx.doi.org/10.1109/TVCG.2013.137},
author = {Bakke, E. and Karger, D.R. and Miller, R.C.},
pages = {2586--2595},
keywords = {Hierarchy data, tabular data, nested relations, layout management},
abstract = {Domain-specific database applications tend to contain a sizable number of table-, form-, and report-style views that must each be designed and maintained by a software developer. A significant part of this job is the necessary tweaking of low-level presentation details such as label placements, text field dimensions, list or table styles, and so on. In this paper, we present a horizontally constrained layout management algorithm that automates the display of structured hierarchical data using the traditional visual idioms of hand-designed database UIs: tables, multi-column forms, and outline-style indented lists. We compare our system with pure outline and nested table layouts with respect to space efficiency and readability, the latter with an online user study on 27 subjects. Our layouts are 3.9 and 1.6 times more compact on average than outline layouts and horizontally unconstrained table layouts, respectively, and are as readable as table layouts even for large datasets.},
}
@article{p303,
journal = {IEEE TVCG},
year = 2013,
title = {Common Angle Plots as Perception-True Visualizations of Categorical Associations},
doi = {10.1109/TVCG.2013.140},
url = {http://dx.doi.org/10.1109/TVCG.2013.140},
author = {Hofmann, H. and Vendettuoli, M.},
pages = {2297--2305},
keywords = {Linewidth illusion, data visualization, high-dimensional displays, parallel sets, hammock plots, Muller-Lyer illusion},
abstract = {Visualizations are great tools of communications-they summarize findings and quickly convey main messages to our audience. As designers of charts we have to make sure that information is shown with a minimum of distortion. We have to also consider illusions and other perceptual limitations of our audience. In this paper we discuss the effect and strength of the line width illusion, a Muller-Lyer type illusion, on designs related to displaying associations between categorical variables. Parallel sets and hammock plots are both affected by line width illusions. We introduce the common-angle plot as an alternative method for displaying categorical data in a manner that minimizes the effect from perceptual illusions. Results from user studies both highlight the need for addressing line-width illusions in displays and provide evidence that common angle charts successfully resolve this issue.},
}
@article{p304,
journal = {IEEE TVCG},
year = 2013,
title = {Creative User-Centered Visualization Design for Energy Analysts and Modelers},
doi = {10.1109/TVCG.2013.145},
url = {http://dx.doi.org/10.1109/TVCG.2013.145},
author = {Goodwin, S. and Dykes, J. and Jones, S. and Dillingham, I. and Dove, G. and Duffy, A. and Kachkaev, A. and Slingsby, A. and Wood, J.},
pages = {2516--2525},
keywords = {Creativity techniques, user-centered design, data visualization, smart home, energy consumption},
abstract = {We enhance a user-centered design process with techniques that deliberately promote creativity to identify opportunities for the visualization of data generated by a major energy supplier. Visualization prototypes developed in this way prove effective in a situation whereby data sets are largely unknown and requirements open - enabling successful exploration of possibilities for visualization in Smart Home data analysis. The process gives rise to novel designs and design metaphors including data sculpting. It suggests: that the deliberate use of creativity techniques with data stakeholders is likely to contribute to successful, novel and effective solutions; that being explicit about creativity may contribute to designers developing creative solutions; that using creativity techniques early in the design process may result in a creative approach persisting throughout the process. The work constitutes the first systematic visualization design for a data rich source that will be increasingly important to energy suppliers and consumers as Smart Meter technology is widely deployed. It is novel in explicitly employing creativity techniques at the requirements stage of visualization design and development, paving the way for further use and study of creativity methods in visualization design.},
}
@article{p305,
journal = {IEEE TVCG},
year = 2013,
title = {DiffAni: Visualizing Dynamic Graphs with a Hybrid of Difference Maps and Animation},
doi = {10.1109/TVCG.2013.149},
url = {http://dx.doi.org/10.1109/TVCG.2013.149},
author = {Rufiange, S. and McGuffin, M.J.},
pages = {2556--2565},
keywords = {Dynamic networks, hybrid visualization, taxonomy, evolution, animation, difference map},
abstract = {Visualization of dynamically changing networks (graphs) is a significant challenge for researchers. Previous work has experimentally compared animation, small multiples, and other techniques, and found trade-offs between these. One potential way to avoid such trade-offs is to combine previous techniques in a hybrid visualization. We present two taxonomies of visualizations of dynamic graphs: one of non-hybrid techniques, and one of hybrid techniques. We also describe a prototype, called DiffAni, that allows a graph to be visualized as a sequence of three kinds of tiles: diff tiles that show difference maps over some time interval, animation tiles that show the evolution of the graph over some time interval, and small multiple tiles that show the graph state at an individual time slice. This sequence of tiles is ordered by time and covers all time slices in the data. An experimental evaluation of DiffAni shows that our hybrid approach has advantages over non-hybrid techniques in certain cases.},
}
@article{p306,
journal = {IEEE TVCG},
year = 2013,
title = {Dimension Projection Matrix/Tree: Interactive Subspace Visual Exploration and Analysis of High Dimensional Data},
doi = {10.1109/TVCG.2013.150},
url = {http://dx.doi.org/10.1109/TVCG.2013.150},
author = {Xiaoru Yuan and Donghao Ren and Zuchao Wang and Cong Guo},
pages = {2625--2633},
keywords = {High dimensional data, hierarchical visualization, sub-dimensional space, user interaction, subspace, tree, matrix},
abstract = {For high-dimensional data, this work proposes two novel visual exploration methods to gain insights into the data aspect and the dimension aspect of the data. The first is a Dimension Projection Matrix, as an extension of a scatterplot matrix. In the matrix, each row or column represents a group of dimensions, and each cell shows a dimension projection (such as MDS) of the data with the corresponding dimensions. The second is a Dimension Projection Tree, where every node is either a dimension projection plot or a Dimension Projection Matrix. Nodes are connected with links and each child node in the tree covers a subset of the parent node's dimensions or a subset of the parent node's data items. While the tree nodes visualize the subspaces of dimensions or subsets of the data items under exploration, the matrix nodes enable cross-comparison between different combinations of subspaces. Both Dimension Projection Matrix and Dimension Project Tree can be constructed algorithmically through automation, or manually through user interaction. Our implementation enables interactions such as drilling down to explore different levels of the data, merging or splitting the subspaces to adjust the matrix, and applying brushing to select data clusters. Our method enables simultaneously exploring data correlation and dimension correlation for data with high dimensions.},
}
@article{p307,
journal = {IEEE TVCG},
year = 2013,
title = {Edge Compression Techniques for Visualization of Dense Directed Graphs},
doi = {10.1109/TVCG.2013.151},
url = {http://dx.doi.org/10.1109/TVCG.2013.151},
author = {Dwyer, T. and Riche, N.H. and Marriott, K. and Mears, C.},
pages = {2596--2605},
keywords = {Directed graphs, networks, modular decomposition, power graph analysis},
abstract = {We explore the effectiveness of visualizing dense directed graphs by replacing individual edges with edges connected to 'modules'-or groups of nodes-such that the new edges imply aggregate connectivity. We only consider techniques that offer a lossless compression: that is, where the entire graph can still be read from the compressed version. The techniques considered are: a simple grouping of nodes with identical neighbor sets; Modular Decomposition which permits internal structure in modules and allows them to be nested; and Power Graph Analysis which further allows edges to cross module boundaries. These techniques all have the same goal-to compress the set of edges that need to be rendered to fully convey connectivity-but each successive relaxation of the module definition permits fewer edges to be drawn in the rendered graph. Each successive technique also, we hypothesize, requires a higher degree of mental effort to interpret. We test this hypothetical trade-off with two studies involving human participants. For Power Graph Analysis we propose a novel optimal technique based on constraint programming. This enables us to explore the parameter space for the technique more precisely than could be achieved with a heuristic. Although applicable to many domains, we are motivated by-and discuss in particular-the application to software dependency analysis.},
}
@article{p308,
journal = {IEEE TVCG},
year = 2013,
title = {Empirical Guidance on Scatterplot and Dimension Reduction Technique Choices},
doi = {10.1109/TVCG.2013.153},
url = {http://dx.doi.org/10.1109/TVCG.2013.153},
author = {Sedlmair, M. and Munzner, T. and Tory, M.},
pages = {2634--2643},
keywords = {Dimensionality reduction, scatterplots, quantitative study},
abstract = {To verify cluster separation in high-dimensional data, analysts often reduce the data with a dimension reduction (DR) technique, and then visualize it with 2D Scatterplots, interactive 3D Scatterplots, or Scatterplot Matrices (SPLOMs). With the goal of providing guidance between these visual encoding choices, we conducted an empirical data study in which two human coders manually inspected a broad set of 816 scatterplots derived from 75 datasets, 4 DR techniques, and the 3 previously mentioned scatterplot techniques. Each coder scored all color-coded classes in each scatterplot in terms of their separability from other classes. We analyze the resulting quantitative data with a heatmap approach, and qualitatively discuss interesting scatterplot examples. Our findings reveal that 2D scatterplots are often 'good enough', that is, neither SPLOM nor interactive 3D adds notably more cluster separability with the chosen DR technique. If 2D is not good enough, the most promising approach is to use an alternative DR technique in 2D. Beyond that, SPLOM occasionally adds additional value, and interactive 3D rarely helps but often hurts in terms of poorer class separation and usability. We summarize these results as a workflow model and implications for design. Our results offer guidance to analysts during the DR exploration process.},
}
@article{p309,
journal = {IEEE TVCG},
year = 2013,
title = {Entourage: Visualizing Relationships between Biological Pathways using Contextual Subsets},
doi = {10.1109/TVCG.2013.154},
url = {http://dx.doi.org/10.1109/TVCG.2013.154},
author = {Lex, A. and Partl, C. and Kalkofen, D. and Streit, M. and Gratzl, S. and Wassermann, A.M. and Schmalstieg, D. and Pfister, H.},
pages = {2536--2545},
keywords = {Pathway visualization, biological networks, subsets, graphs, biomolecular data},
abstract = {Biological pathway maps are highly relevant tools for many tasks in molecular biology. They reduce the complexity of the overall biological network by partitioning it into smaller manageable parts. While this reduction of complexity is their biggest strength, it is, at the same time, their biggest weakness. By removing what is deemed not important for the primary function of the pathway, biologists lose the ability to follow and understand cross-talks between pathways. Considering these cross-talks is, however, critical in many analysis scenarios, such as judging effects of drugs. In this paper we introduce Entourage, a novel visualization technique that provides contextual information lost due to the artificial partitioning of the biological network, but at the same time limits the presented information to what is relevant to the analyst's task. We use one pathway map as the focus of an analysis and allow a larger set of contextual pathways. For these context pathways we only show the contextual subsets, i.e., the parts of the graph that are relevant to a selection. Entourage suggests related pathways based on similarities and highlights parts of a pathway that are interesting in terms of mapped experimental data. We visualize interdependencies between pathways using stubs of visual links, which we found effective yet not obtrusive. By combining this approach with visualization of experimental data, we can provide domain experts with a highly valuable tool. We demonstrate the utility of Entourage with case studies conducted with a biochemist who researches the effects of drugs on pathways. We show that the technique is well suited to investigate interdependencies between pathways and to analyze, understand, and predict the effect that drugs have on different cell types.},
}
@article{p310,
journal = {IEEE TVCG},
year = 2013,
title = {Evaluation of filesystem Provenance Visualization Tools},
doi = {10.1109/TVCG.2013.155},
url = {http://dx.doi.org/10.1109/TVCG.2013.155},
author = {Borkin, M. and Yeh, C.S. and Boyd, M. and Macko, P. and Gajos, K. and Seltzer, M. and Pfister, H.},
pages = {2476--2485},
keywords = {Provenance data, graph/network data, hierarchy data, quantitative evaluation, gender differences},
abstract = {Having effective visualizations of filesystem provenance data is valuable for understanding its complex hierarchical structure. The most common visual representation of provenance data is the node-link diagram. While effective for understanding local activity, the node-link diagram fails to offer a high-level summary of activity and inter-relationships within the data. We present a new tool, InProv, which displays filesystem provenance with an interactive radial-based tree layout. The tool also utilizes a new time-based hierarchical node grouping method for filesystem provenance data we developed to match the user's mental model and make data exploration more intuitive. We compared InProv to a conventional node-link based tool, Orbiter, in a quantitative evaluation with real users of filesystem provenance data including provenance data experts, IT professionals, and computational scientists. We also compared in the evaluation our new node grouping method to a conventional method. The results demonstrate that InProv results in higher accuracy in identifying system activity than Orbiter with large complex data sets. The results also show that our new time-based hierarchical node grouping method improves performance in both tools, and participants found both tools significantly easier to use with the new time-based node grouping method. Subjective measures show that participants found InProv to require less mental activity, less physical activity, less work, and is less stressful to use. Our study also reveals one of the first cases of gender differences in visualization; both genders had comparable performance with InProv, but women had a significantly lower average accuracy (56%) compared to men (70%) with Orbiter.},
}
@article{p311,
journal = {IEEE TVCG},
year = 2013,
title = {GPLOM: The Generalized Plot Matrix for Visualizing Multidimensional Multivariate Data},
doi = {10.1109/TVCG.2013.160},
url = {http://dx.doi.org/10.1109/TVCG.2013.160},
author = {Im, J.-F. and McGuffin, M.J. and Leung, R.},
pages = {2606--2614},
keywords = {Multidimensional data, tabular data, relational data, mdmv, high-dimensional data, database visualization, database overview, parallel coordinates, scatterplot matrix, user interfaces, business intelligence},
abstract = {Scatterplot matrices (SPLOMs), parallel coordinates, and glyphs can all be used to visualize the multiple continuous variables (i.e., dependent variables or measures) in multidimensional multivariate data. However, these techniques are not well suited to visualizing many categorical variables (i.e., independent variables or dimensions). To visualize multiple categorical variables, 'hierarchical axes' that 'stack dimensions' have been used in systems like Polaris and Tableau. However, this approach does not scale well beyond a small number of categorical variables. Emerson et al. [8] extend the matrix paradigm of the SPLOM to simultaneously visualize several categorical and continuous variables, displaying many kinds of charts in the matrix depending on the kinds of variables involved. We propose a variant of their technique, called the Generalized Plot Matrix (GPLOM). The GPLOM restricts Emerson et al.'s technique to only three kinds of charts (scatterplots for pairs of continuous variables, heatmaps for pairs of categorical variables, and barcharts for pairings of categorical and continuous variable), in an effort to make it easier to understand. At the same time, the GPLOM extends Emerson et al.'s work by demonstrating interactive techniques suited to the matrix of charts. We discuss the visual design and interactive features of our GPLOM prototype, including a textual search feature allowing users to quickly locate values or variables by name. We also present a user study that compared performance with Tableau and our GPLOM prototype, that found that GPLOM is significantly faster in certain cases, and not significantly slower in other cases.},
}
@article{p312,
journal = {IEEE TVCG},
year = 2013,
title = {Hybrid-Image Visualization for Large Viewing Environments},
doi = {10.1109/TVCG.2013.163},
url = {http://dx.doi.org/10.1109/TVCG.2013.163},
author = {Isenberg, P. and Dragicevic, P. and Willett, W. and Bezerianos, A. and Fekete, J.},
pages = {2346--2355},
keywords = {Multi-scale, large displays, hybrid images, collaboration, visualization},
abstract = {We present a first investigation into hybrid-image visualization for data analysis in large-scale viewing environments. Hybrid-image visualizations blend two different visual representations into a single static view, such that each representation can be perceived at a different viewing distance. Our work is motivated by data analysis scenarios that incorporate one or more displays with sufficiently large size and resolution to be comfortably viewed by different people from various distances. Hybrid-image visualizations can be used, in particular, to enhance overview tasks from a distance and detail-in-context tasks when standing close to the display. By using a perception-based blending approach, hybrid-image visualizations make two full-screen visualizations accessible without tracking viewers in front of a display. We contribute a design space, discuss the perceptual rationale for our work, provide examples, and introduce a set of techniques and tools to aid the design of hybrid-image visualizations.},
}
@article{p313,
journal = {IEEE TVCG},
year = 2013,
title = {Information Visualization and Proxemics: Design Opportunities and Empirical findings},
doi = {10.1109/TVCG.2013.166},
url = {http://dx.doi.org/10.1109/TVCG.2013.166},
author = {Jakobsen, M.R. and Sahlemariam Haile, Y. and Knudsen, S. and Hornbaek, K.},
pages = {2386--2395},
keywords = {Proxemics, information visualization, user study, large displays, user tracking, movement, orientation, distance},
abstract = {People typically interact with information visualizations using a mouse. Their physical movement, orientation, and distance to visualizations are rarely used as input. We explore how to use such spatial relations among people and visualizations (i.e., proxemics) to drive interaction with visualizations, focusing here on the spatial relations between a single user and visualizations on a large display. We implement interaction techniques that zoom and pan, query and relate, and adapt visualizations based on tracking of users' position in relation to a large high-resolution display. Alternative prototypes are tested in three user studies and compared with baseline conditions that use a mouse. Our aim is to gain empirical data on the usefulness of a range of design possibilities and to generate more ideas. Among other things, the results show promise for changing zoom level or visual representation with the user's physical distance to a large display. We discuss possible benefits and potential issues to avoid when designing information visualizations that use proxemics.},
}
@article{p314,
journal = {IEEE TVCG},
year = 2013,
title = {Interactive Visualizations on Large and Small Displays: The Interrelation of Display Size, Information Space, and Scale},
doi = {10.1109/TVCG.2013.170},
url = {http://dx.doi.org/10.1109/TVCG.2013.170},
author = {Jakobsen, M.R. and Hornbaek, K.},
pages = {2336--2345},
keywords = {Information visualization, multi-scale navigation, interaction techniques, experimental method, user studies},
abstract = {In controlled experiments on the relation of display size (i.e., the number of pixels) and the usability of visualizations, the size of the information space can either be kept constant or varied relative to display size. Both experimental approaches have limitations. If the information space is kept constant then the scale ratio between an overview of the entire information space and the lowest zoom level varies, which can impact performance; if the information space is varied then the scale ratio is kept constant, but performance cannot be directly compared. In other words, display size, information space, and scale ratio are interrelated variables. We investigate this relation in two experiments with interfaces that implement classic information visualization techniques-focus+context, overview+detail, and zooming-for multi-scale navigation in maps. Display size varied between 0.17, 1.5, and 13.8 megapixels. Information space varied relative to display size in one experiment and was constant in the other. Results suggest that for tasks where users navigate targets that are visible at all map scales the interfaces do not benefit from a large display: With a constant map size, a larger display does not improve performance with the interfaces; with map size varied relative to display size, participants found interfaces harder to use with a larger display and task completion times decrease only when they are normalized to compensate for the increase in map size. The two experimental approaches show different interaction effects between display size and interface. In particular, focus+context performs relatively worse at a large display size with variable map size, and relatively worse at a small display size with a fixed map size. Based on a theoretical analysis of the interaction with the visualization techniques, we examine individual task actions empirically so as to understand the relative impact of display size and scale ratio on the visualization techniques' p- rformance and to discuss differences between the two experimental approaches.},
}
@article{p315,
journal = {IEEE TVCG},
year = 2013,
title = {LineUp: Visual Analysis of Multi-Attribute Rankings},
doi = {10.1109/TVCG.2013.173},
url = {http://dx.doi.org/10.1109/TVCG.2013.173},
author = {Gratzl, S. and Lex, A. and Gehlenborg, N. and Pfister, H. and Streit, M.},
pages = {2277--2286},
keywords = {Ranking visualization, ranking, scoring, multi-attribute, multifactorial, multi-faceted, stacked bar charts},
abstract = {Rankings are a popular and universal approach to structuring otherwise unorganized collections of items by computing a rank for each item based on the value of one or more of its attributes. This allows us, for example, to prioritize tasks or to evaluate the performance of products relative to each other. While the visualization of a ranking itself is straightforward, its interpretation is not, because the rank of an item represents only a summary of a potentially complicated relationship between its attributes and those of the other items. It is also common that alternative rankings exist which need to be compared and analyzed to gain insight into how multiple heterogeneous attributes affect the rankings. Advanced visual exploration tools are needed to make this process efficient. In this paper we present a comprehensive analysis of requirements for the visualization of multi-attribute rankings. Based on these considerations, we propose LineUp - a novel and scalable visualization technique that uses bar charts. This interactive technique supports the ranking of items based on multiple heterogeneous attributes with different scales and semantics. It enables users to interactively combine attributes and flexibly refine parameters to explore the effect of changes in the attribute combination. This process can be employed to derive actionable insights as to which attributes of an item need to be modified in order for its rank to change. Additionally, through integration of slope graphs, LineUp can also be used to compare multiple alternative rankings on the same set of items, for example, over time or across different attribute combinations. We evaluate the effectiveness of the proposed multi-attribute visualization technique in a qualitative study. The study shows that users are able to successfully solve complex ranking tasks in a short period of time.},
}
@article{p316,
journal = {IEEE TVCG},
year = 2013,
title = {Nanocubes for Real-Time Exploration of Spatiotemporal Datasets},
doi = {10.1109/TVCG.2013.179},
url = {http://dx.doi.org/10.1109/TVCG.2013.179},
author = {Lins, L. and Klosowski, J.T. and Scheidegger, C.E.},
pages = {2456--2465},
keywords = {Data cube, Data structures, Interactive exploration},
abstract = {Consider real-time exploration of large multidimensional spatiotemporal datasets with billions of entries, each defined by a location, a time, and other attributes. Are certain attributes correlated spatially or temporally? Are there trends or outliers in the data? Answering these questions requires aggregation over arbitrary regions of the domain and attributes of the data. Many relational databases implement the well-known data cube aggregation operation, which in a sense precomputes every possible aggregate query over the database. Data cubes are sometimes assumed to take a prohibitively large amount of space, and to consequently require disk storage. In contrast, we show how to construct a data cube that fits in a modern laptop's main memory, even for billions of entries; we call this data structure a nanocube. We present algorithms to compute and query a nanocube, and show how it can be used to generate well-known visual encodings such as heatmaps, histograms, and parallel coordinate plots. When compared to exact visualizations created by scanning an entire dataset, nanocube plots have bounded screen error across a variety of scales, thanks to a hierarchical structure in space and time. We demonstrate the effectiveness of our technique on a variety of real-world datasets, and present memory, timing, and network bandwidth measurements. We find that the timings for the queries in our examples are dominated by network and user-interaction latencies.},
}
@article{p317,
journal = {IEEE TVCG},
year = 2013,
title = {Orthographic Star Coordinates},
doi = {10.1109/TVCG.2013.182},
url = {http://dx.doi.org/10.1109/TVCG.2013.182},
author = {Lehmann, D.J. and Theisel, H.},
pages = {2615--2624},
keywords = {Start plot, multivariate visualization, visual analytics},
abstract = {Star coordinates is a popular projection technique from an nD data space to a 2D/3D visualization domain. It is defined by setting n coordinate axes in the visualization domain. Since it generally defines an affine projection, strong distortions can occur: an nD sphere can be mapped to an ellipse of arbitrary size and aspect ratio. We propose to restrict star coordinates to orthographic projections which map an nD sphere of radius r to a 2D circle of radius r. We achieve this by formulating conditions for the coordinate axes to define orthographic projections, and by running a repeated non-linear optimization in the background of every modification of the coordinate axes. This way, we define a number of orthographic interaction concepts as well as orthographic data tour sequences: a scatterplot tour, a principle component tour, and a grand tour. All concepts are illustrated and evaluated with synthetic and real data.},
}
@article{p318,
journal = {IEEE TVCG},
year = 2013,
title = {Perception of Average Value in Multiclass Scatterplots},
doi = {10.1109/TVCG.2013.183},
url = {http://dx.doi.org/10.1109/TVCG.2013.183},
author = {Gleicher, M. and Correll, M. and Nothelfer, C. and Franconeri, S.},
pages = {2316--2325},
keywords = {Psychophysics, Information Visualization, Perceptual Study},
abstract = {The visual system can make highly efficient aggregate judgements about a set of objects, with speed roughly independent of the number of objects considered. While there is a rich literature on these mechanisms and their ramifications for visual summarization tasks, this prior work rarely considers more complex tasks requiring multiple judgements over long periods of time, and has not considered certain critical aggregation types, such as the localization of the mean value of a set of points. In this paper, we explore these questions using a common visualization task as a case study: relative mean value judgements within multi-class scatterplots. We describe how the perception literature provides a set of expected constraints on the task, and evaluate these predictions with a large-scale perceptual study with crowd-sourced participants. Judgements are no harder when each set contains more points, redundant and conflicting encodings, as well as additional sets, do not strongly affect performance, and judgements are harder when using less salient encodings. These results have concrete ramifications for the design of scatterplots.},
}
@article{p319,
journal = {IEEE TVCG},
year = 2013,
title = {Radial Sets: Interactive Visual Analysis of Large Overlapping Sets},
doi = {10.1109/TVCG.2013.184},
url = {http://dx.doi.org/10.1109/TVCG.2013.184},
author = {Alsallakh, B. and Aigner, W. and Miksch, S. and Hauser, H.},
pages = {2496--2505},
keywords = {Multi-valued attributes, set-typed data, overlapping sets, visualization technique, scalability},
abstract = {In many applications, data tables contain multi-valued attributes that often store the memberships of the table entities to multiple sets such as which languages a person masters, which skills an applicant documents, or which features a product comes with. With a growing number of entities, the resulting element-set membership matrix becomes very rich of information about how these sets overlap. Many analysis tasks targeted at set-typed data are concerned with these overlaps as salient features of such data. This paper presents Radial Sets, a novel visual technique to analyze set memberships for a large number of elements. Our technique uses frequency-based representations to enable quickly finding and analyzing different kinds of overlaps between the sets, and relating these overlaps to other attributes of the table entities. Furthermore, it enables various interactions to select elements of interest, find out if they are over-represented in specific sets or overlaps, and if they exhibit a different distribution for a specific attribute compared to the rest of the elements. These interactions allow formulating highly-expressive visual queries on the elements in terms of their set memberships and attribute values. As we demonstrate via two usage scenarios, Radial Sets enable revealing and analyzing a multitude of overlapping patterns between large sets, beyond the limits of state-of-the-art techniques.},
}
@article{p320,
journal = {IEEE TVCG},
year = 2013,
title = {Selecting the Aspect Ratio of a Scatter Plot Based on Its Delaunay Triangulation},
doi = {10.1109/TVCG.2013.187},
url = {http://dx.doi.org/10.1109/TVCG.2013.187},
author = {Fink, M. and Haunert, J.-H. and Spoerhase, J. and Wolff, A.},
pages = {2326--2335},
keywords = {Scatter plot, aspect ratio, Delaunay triangulation},
abstract = {Scatter plots are diagrams that visualize two-dimensional data as sets of points in the plane. They allow users to detect correlations and clusters in the data. Whether or not a user can accomplish these tasks highly depends on the aspect ratio selected for the plot, i.e., the ratio between the horizontal and the vertical extent of the diagram. We argue that an aspect ratio is good if the Delaunay triangulation of the scatter plot at this aspect ratio has some nice geometric property, e.g., a large minimum angle or a small total edge length. More precisely, we consider the following optimization problem. Given a set Q of points in the plane, find a scale factor s such that scaling the x-coordinates of the points in Q by s and the y-coordinates by 1=s yields a point set P(s) that optimizes a property of the Delaunay triangulation of P(s), over all choices of s. We present an algorithm that solves this problem efficiently and demonstrate its usefulness on real-world instances. Moreover, we discuss an empirical test in which we asked 64 participants to choose the aspect ratios of 18 scatter plots. We tested six different quality measures that our algorithm can optimize. In conclusion, minimizing the total edge length and minimizing what we call the 'uncompactness' of the triangles of the Delaunay triangulation yielded the aspect ratios that were most similar to those chosen by the participants in the test.},
}
@article{p321,
journal = {IEEE TVCG},
year = 2013,
title = {SketchStory: Telling More Engaging Stories with Data through Freeform Sketching},
doi = {10.1109/TVCG.2013.191},
url = {http://dx.doi.org/10.1109/TVCG.2013.191},
author = {Bongshin Lee and Kazi, R.H. and Smith, G.},
pages = {2416--2425},
keywords = {Storytelling, data presentation, sketch, pen and touch, interaction, visualization},
abstract = {Presenting and communicating insights to an audience-telling a story-is one of the main goals of data exploration. Even though visualization as a storytelling medium has recently begun to gain attention, storytelling is still underexplored in information visualization and little research has been done to help people tell their stories with data. To create a new, more engaging form of storytelling with data, we leverage and extend the narrative storytelling attributes of whiteboard animation with pen and touch interactions. We present SketchStory, a data-enabled digital whiteboard that facilitates the creation of personalized and expressive data charts quickly and easily. SketchStory recognizes a small set of sketch gestures for chart invocation, and automatically completes charts by synthesizing the visuals from the presenter-provided example icon and binding them to the underlying data. Furthermore, SketchStory allows the presenter to move and resize the completed data charts with touch, and filter the underlying data to facilitate interactive exploration. We conducted a controlled experiment for both audiences and presenters to compare SketchStory with a traditional presentation system, Microsoft PowerPoint. Results show that the audience is more engaged by presentations done with SketchStory than PowerPoint. Eighteen out of 24 audience participants preferred SketchStory to PowerPoint. Four out of five presenter participants also favored SketchStory despite the extra effort required for presentation.},
}
@article{p322,
journal = {IEEE TVCG},
year = 2013,
title = {SoccerStories: A Kick-off for Visual Soccer Analysis},
doi = {10.1109/TVCG.2013.192},
url = {http://dx.doi.org/10.1109/TVCG.2013.192},
author = {Perin, C. and Vuillemot, R. and Fekete, J.},
pages = {2506--2515},
keywords = {Visual knowledge discovery, visual knowledge representation, sport analytics, visual aggregation},
abstract = {This article presents SoccerStories, a visualization interface to support analysts in exploring soccer data and communicating interesting insights. Currently, most analyses on such data relate to statistics on individual players or teams. However, soccer analysts we collaborated with consider that quantitative analysis alone does not convey the right picture of the game, as context, player positions and phases of player actions are the most relevant aspects. We designed SoccerStories to support the current practice of soccer analysts and to enrich it, both in the analysis and communication stages. Our system provides an overview+detail interface of game phases, and their aggregation into a series of connected visualizations, each visualization being tailored for actions such as a series of passes or a goal attempt. To evaluate our tool, we ran two qualitative user studies on recent games using SoccerStories with data from one of the world's leading live sports data providers. The first study resulted in a series of four articles on soccer tactics, by a tactics analyst, who said he would not have been able to write these otherwise. The second study consisted in an exploratory follow-up to investigate design alternatives for embedding soccer phases into word-sized graphics. For both experiments, we received a very enthusiastic feedback and participants consider further use of SoccerStories to enhance their current workflow.},
}
@article{p323,
journal = {IEEE TVCG},
year = 2013,
title = {StoryFlow: Tracking the Evolution of Stories},
doi = {10.1109/TVCG.2013.196},
url = {http://dx.doi.org/10.1109/TVCG.2013.196},
author = {Shixia Liu and Yingcai Wu and Enxun Wei and Mengchen Liu and Yang Liu},
pages = {2436--2445},
keywords = {Storylines, story-telling visualization, user interactions, level-of-detail, optimization},
abstract = {Storyline visualizations, which are useful in many applications, aim to illustrate the dynamic relationships between entities in a story. However, the growing complexity and scalability of stories pose great challenges for existing approaches. In this paper, we propose an efficient optimization approach to generating an aesthetically appealing storyline visualization, which effectively handles the hierarchical relationships between entities over time. The approach formulates the storyline layout as a novel hybrid optimization approach that combines discrete and continuous optimization. The discrete method generates an initial layout through the ordering and alignment of entities, and the continuous method optimizes the initial layout to produce the optimal one. The efficient approach makes real-time interactions (e.g., bundling and straightening) possible, thus enabling users to better understand and track how the story evolves. Experiments and case studies are conducted to demonstrate the effectiveness and usefulness of the optimization approach.},
}
@article{p324,
journal = {IEEE TVCG},
year = 2013,
title = {Understanding Interfirm Relationships in Business Ecosystems with Interactive Visualization},
doi = {10.1109/TVCG.2013.209},
url = {http://dx.doi.org/10.1109/TVCG.2013.209},
author = {Basole, R.C. and Clear, T. and Mengdie Hu and Mehrotra, H. and Stasko, J.},
pages = {2526--2535},
keywords = {Business ecosystems, market research, strategic analysis, design study, interaction, network visualization},
abstract = {Business ecosystems are characterized by large, complex, and global networks of firms, often from many different market segments, all collaborating, partnering, and competing to create and deliver new products and services. Given the rapidly increasing scale, complexity, and rate of change of business ecosystems, as well as economic and competitive pressures, analysts are faced with the formidable task of quickly understanding the fundamental characteristics of these interfirm networks. Existing tools, however, are predominantly query- or list-centric with limited interactive, exploratory capabilities. Guided by a field study of corporate analysts, we have designed and implemented dotlink360, an interactive visualization system that provides capabilities to gain systemic insight into the compositional, temporal, and connective characteristics of business ecosystems. dotlink360 consists of novel, multiple connected views enabling the analyst to explore, discover, and understand interfirm networks for a focal firm, specific market segments or countries, and the entire business ecosystem. System evaluation by a small group of prototypical users shows supporting evidence of the benefits of our approach. This design study contributes to the relatively unexplored, but promising area of exploratory information visualization in market research and business strategy.},
}
@article{p325,
journal = {IEEE TVCG},
year = 2013,
title = {Using Concrete Scales: A Practical Framework for Effective Visual Depiction of Complex Measures},
doi = {10.1109/TVCG.2013.210},
url = {http://dx.doi.org/10.1109/TVCG.2013.210},
author = {Chevalier, F. and Vuillemot, R. and Gali, G.},
pages = {2426--2435},
keywords = {Concrete scale, scale cognition, visual comparison, graphic composition, visual notation},
abstract = {From financial statistics to nutritional values, we are frequently exposed to quantitative information expressed in measures of either extreme magnitudes or unfamiliar units, or both. A common practice used to comprehend such complex measures is to relate, re-express, and compare them through visual depictions using magnitudes and units that are easier to grasp. Through this practice, we create a new graphic composition that we refer to as a concrete scale. To the best of our knowledge, there are no design guidelines that exist for concrete scales despite their common use in communication, educational, and decision-making settings. We attempt to fill this void by introducing a novel framework that would serve as a practical guide for their analysis and design. Informed by a thorough analysis of graphic compositions involving complex measures and an extensive literature review of scale cognition mechanisms, our framework outlines the design space of various measure relations-specifically relations involving the re-expression of complex measures to more familiar concepts-and their visual representations as graphic compositions.},
}
@article{p326,
journal = {IEEE TVCG},
year = 2013,
title = {Variant View: Visualizing Sequence Variants in their Gene Context},
doi = {10.1109/TVCG.2013.214},
url = {http://dx.doi.org/10.1109/TVCG.2013.214},
author = {Ferstay, J.A. and Nielsen, C.B. and Munzner, T.},
pages = {2546--2555},
keywords = {Information visualization, design study, bioinformatics, genetic variants},
abstract = {Scientists use DNA sequence differences between an individual's genome and a standard reference genome to study the genetic basis of disease. Such differences are called sequence variants, and determining their impact in the cell is difficult because it requires reasoning about both the type and location of the variant across several levels of biological context. In this design study, we worked with four analysts to design a visualization tool supporting variant impact assessment for three different tasks. We contribute data and task abstractions for the problem of variant impact assessment, and the carefully justified design and implementation of the Variant View tool. Variant View features an information-dense visual encoding that provides maximal information at the overview level, in contrast to the extensive navigation required by currently-prevalent genome browsers. We provide initial evidence that the tool simplified and accelerated workflows for these three tasks through three case studies. Finally, we reflect on the lessons learned in creating and refining data and task abstractions that allow for concise overviews of sprawling information spaces that can reduce or remove the need for the memory-intensive use of navigation.},
}
@article{p327,
journal = {IEEE TVCG},
year = 2013,
title = {Visual Compression of Workflow Visualizations with Automated Detection of Macro Motifs},
doi = {10.1109/TVCG.2013.225},
url = {http://dx.doi.org/10.1109/TVCG.2013.225},
author = {Maguire, E. and Rocca-Serra, P. and Sansone, S.-A. and Davies, J. and Chen, M.},
pages = {2576--2585},
keywords = {Workflow visualization, motif detection, glyph-based visualization, glyph generation, state-transition-based algorithm},
abstract = {This paper is concerned with the creation of 'macros' in workflow visualization as a support tool to increase the efficiency of data curation tasks. We propose computation of candidate macros based on their usage in large collections of workflows in data repositories. We describe an efficient algorithm for extracting macro motifs from workflow graphs. We discovered that the state transition information, used to identify macro candidates, characterizes the structural pattern of the macro and can be harnessed as part of the visual design of the corresponding macro glyph. This facilitates partial automation and consistency in glyph design applicable to a large set of macro glyphs. We tested this approach against a repository of biological data holding some 9,670 workflows and found that the algorithmically generated candidate macros are in keeping with domain expert expectations.},
}
@article{p328,
journal = {IEEE TVCG},
year = 2013,
title = {Visual Sedimentation},
doi = {10.1109/TVCG.2013.227},
url = {http://dx.doi.org/10.1109/TVCG.2013.227},
author = {Huron, S. and Vuillemot, R. and Fekete, J.},
pages = {2446--2455},
keywords = {Design, Information Visualization, Dynamic visualization, Dynamic data, Data stream, Real time, Metaphor},
abstract = {We introduce Visual Sedimentation, a novel design metaphor for visualizing data streams directly inspired by the physical process of sedimentation. Visualizing data streams (e. g., Tweets, RSS, Emails) is challenging as incoming data arrive at unpredictable rates and have to remain readable. For data streams, clearly expressing chronological order while avoiding clutter, and keeping aging data visible, are important. The metaphor is drawn from the real-world sedimentation processes: objects fall due to gravity, and aggregate into strata over time. Inspired by this metaphor, data is visually depicted as falling objects using a force model to land on a surface, aggregating into strata over time. In this paper, we discuss how this metaphor addresses the specific challenge of smoothing the transition between incoming and aging data. We describe the metaphor's design space, a toolkit developed to facilitate its implementation, and example applications to a range of case studies. We then explore the generative capabilities of the design space through our toolkit. We finally illustrate creative extensions of the metaphor when applied to real streams of data.},
}
@article{p329,
journal = {IEEE TVCG},
year = 2013,
title = {Visualization of Shape Motions in Shape Space},
doi = {10.1109/TVCG.2013.230},
url = {http://dx.doi.org/10.1109/TVCG.2013.230},
author = {Taimouri, V. and Jing Hua},
pages = {2644--2652},
keywords = {Medial surface, shape space, comparative visualization, left ventricle diagnosis},
abstract = {Analysis of dynamic object deformations such as cardiac motion is of great importance, especially when there is a necessity to visualize and compare the deformation behavior across subjects. However, there is a lack of effective techniques for comparative visualization and assessment of a collection of motion data due to its 4-dimensional nature, i.e., timely varying three-dimensional shapes. From the geometric point of view, the motion change can be considered as a function defined on the 2D manifold of the surface. This paper presents a novel classification and visualization method based on a medial surface shape space, in which two novel shape descriptors are defined, for discriminating normal and abnormal human heart deformations as well as localizing the abnormal motion regions. In our medial surface shape space, the geodesic distance connecting two points in the space measures the similarity between their corresponding medial surfaces, which can quantify the similarity and disparity of the 3D heart motions. Furthermore, the novel descriptors can effectively localize the inconsistently deforming myopathic regions on the left ventricle. An easy visualization of heart motion sequences on the projected space allows users to distinguish the deformation differences. Our experimental results on both synthetic and real imaging data show that this method can automatically classify the healthy and myopathic subjects and accurately detect myopathic regions on the left ventricle, which outperforms other conventional cardiac diagnostic methods.},
}
@article{p330,
journal = {IEEE TVCG},
year = 2013,
title = {Visualizing Change over Time Using Dynamic Hierarchies: TreeVersity2 and the StemView},
doi = {10.1109/TVCG.2013.231},
url = {http://dx.doi.org/10.1109/TVCG.2013.231},
author = {Guerra-Gomez, J. and Pack, M.L. and Plaisant, C. and Shneiderman, B.},
pages = {2566--2575},
keywords = {Information visualization, Tree comparison},
abstract = {To analyze data such as the US Federal Budget or characteristics of the student population of a University it is common to look for changes over time. This task can be made easier and more fruitful if the analysis is performed by grouping by attributes, such as by Agencies, Bureaus and Accounts for the Budget, or Ethnicity, Gender and Major in a University. We present TreeVersity2, a web based interactive data visualization tool that allows users to analyze change in datasets by creating dynamic hierarchies based on the data attributes. TreeVersity2 introduces a novel space filling visualization (StemView) to represent change in trees at multiple levels - not just at the leaf level. With this visualization users can explore absolute and relative changes, created and removed nodes, and each node's actual values, while maintaining the context of the tree. In addition, TreeVersity2 provides overviews of change over the entire time period, and a reporting tool that lists outliers in textual form, which helps users identify the major changes in the data without having to manually setup filters. We validated TreeVersity2 with 12 case studies with organizations as diverse as the National Cancer Institute, Federal Drug Administration, Department of Transportation, Office of the Bursar of the University of Maryland, or eBay. Our case studies demonstrated that TreeVersity2 is flexible enough to be used in different domains and provide useful insights for the data owners. A TreeVersity2 demo can be found at https://treeversity.cattlab.umd.edu.},
}
@article{p331,
journal = {IEEE TVCG},
year = 2013,
title = {Visualizing Fuzzy Overlapping Communities in Networks},
doi = {10.1109/TVCG.2013.232},
url = {http://dx.doi.org/10.1109/TVCG.2013.232},
author = {Vehlow, C. and Reinhardt, T. and Weiskopf, D.},
pages = {2486--2495},
keywords = {Overlapping community visualization, fuzzy clustering, graph visualization, uncertainty visualization},
abstract = {An important feature of networks for many application domains is their community structure. This is because objects within the same community usually have at least one property in common. The investigation of community structure can therefore support the understanding of object attributes from the network topology alone. In real-world systems, objects may belong to several communities at the same time, i.e., communities can overlap. Analyzing fuzzy community memberships is essential to understand to what extent objects contribute to different communities and whether some communities are highly interconnected. We developed a visualization approach that is based on node-link diagrams and supports the investigation of fuzzy communities in weighted undirected graphs at different levels of detail. Starting with the network of communities, the user can continuously drill down to the network of individual nodes and finally analyze the membership distribution of nodes of interest. Our approach uses layout strategies and further visual mappings to graphically encode the fuzzy community memberships. The usefulness of our approach is illustrated by two case studies analyzing networks of different domains: social networking and biological interactions. The case studies showed that our layout and visualization approach helps investigate fuzzy overlapping communities. Fuzzy vertices as well as the different communities to which they belong can be easily identified based on node color and position.},
}
@article{p332,
journal = {IEEE TVCG},
year = 2013,
title = {Visualizing Request-Flow Comparison to Aid Performance Diagnosis in Distributed Systems},
doi = {10.1109/TVCG.2013.233},
url = {http://dx.doi.org/10.1109/TVCG.2013.233},
author = {Sambasivan, R.R. and Shafer, I. and Mazurek, M.L. and Ganger, G.R.},
pages = {2466--2475},
keywords = {Distributed systems, human factors, problem diagnosis, visualization},
abstract = {Distributed systems are complex to develop and administer, and performance problem diagnosis is particularly challenging. When performance degrades, the problem might be in any of the system's many components or could be a result of poor interactions among them. Recent research efforts have created tools that automatically localize the problem to a small number of potential culprits, but research is needed to understand what visualization techniques work best for helping distributed systems developers understand and explore their results. This paper compares the relative merits of three well-known visualization approaches (side-by-side, diff, and animation) in the context of presenting the results of one proven automated localization technique called request-flow comparison. Via a 26-person user study, which included real distributed systems developers, we identify the unique benefits that each approach provides for different problem types and usage modes.},
}
@article{p333,
journal = {IEEE TVCG},
year = 2013,
title = {What Makes a Visualization Memorable?},
doi = {10.1109/TVCG.2013.234},
url = {http://dx.doi.org/10.1109/TVCG.2013.234},
author = {Borkin, M. and Vo, A.A. and Bylinskii, Z. and Isola, P. and Sunkavalli, S. and Oliva, A. and Pfister, H.},
pages = {2306--2315},
keywords = {Visualization taxonomy, information visualization, memorability},
abstract = {An ongoing debate in the Visualization community concerns the role that visualization types play in data understanding. In human cognition, understanding and memorability are intertwined. As a first step towards being able to ask questions about impact and effectiveness, here we ask: 'What makes a visualization memorable?' We ran the largest scale visualization study to date using 2,070 single-panel visualizations, categorized with visualization type (e.g., bar chart, line graph, etc.), collected from news media sites, government reports, scientific journals, and infographic sources. Each visualization was annotated with additional attributes, including ratings for data-ink ratios and visual densities. Using Amazon's Mechanical Turk, we collected memorability scores for hundreds of these visualizations, and discovered that observers are consistent in which visualizations they find memorable and forgettable. We find intuitive results (e.g., attributes like color and the inclusion of a human recognizable object enhance memorability) and less intuitive results (e.g., common graphs are less memorable than unique visualization types). Altogether our findings suggest that quantifying memorability is a general metric of the utility of information, an essential step towards determining how to design effective visualizations.},
}
@article{p399,
journal = {IEEE TVCG},
year = 2012,
title = {A User Study on Curved Edges in Graph Visualization},
doi = {10.1109/TVCG.2012.189},
url = {http://dx.doi.org/10.1109/TVCG.2012.189},
author = {Kai Xu and Rooney, C. and Passmore, P. and Dong-Han Ham and Nguyen, P.H.},
pages = {2449--2456},
keywords = {Graph, visualization, curved edges, evaluation},
abstract = {Recently there has been increasing research interest in displaying graphs with curved edges to produce more readable visualizations. While there are several automatic techniques, little has been done to evaluate their effectiveness empirically. In this paper we present two experiments studying the impact of edge curvature on graph readability. The goal is to understand the advantages and disadvantages of using curved edges for common graph tasks compared to straight line segments, which are the conventional choice for showing edges in node-link diagrams. We included several edge variations: straight edges, edges with different curvature levels, and mixed straight and curved edges. During the experiments, participants were asked to complete network tasks including determination of connectivity, shortest path, node degree, and common neighbors. We also asked the participants to provide subjective ratings of the aesthetics of different edge types. The results show significant performance differences between the straight and curved edges and clear distinctions between variations of curved edges.},
}
@article{p400,
journal = {IEEE TVCG},
year = 2012,
title = {Adaptive Composite Map Projections},
doi = {10.1109/TVCG.2012.192},
url = {http://dx.doi.org/10.1109/TVCG.2012.192},
author = {Jenny, B.},
pages = {2575--2582},
keywords = {Multi-scale map, web mapping, web cartography, web map projection, web Mercator, HTML5 Canvas},
abstract = {All major web mapping services use the web Mercator projection. This is a poor choice for maps of the entire globe or areas of the size of continents or larger countries because the Mercator projection shows medium and higher latitudes with extreme areal distortion and provides an erroneous impression of distances and relative areas. The web Mercator projection is also not able to show the entire globe, as polar latitudes cannot be mapped. When selecting an alternative projection for information visualization, rivaling factors have to be taken into account, such as map scale, the geographic area shown, the map's height-to-width ratio, and the type of cartographic visualization. It is impossible for a single map projection to meet the requirements for all these factors. The proposed composite map projection combines several projections that are recommended in cartographic literature and seamlessly morphs map space as the user changes map scale or the geographic region displayed. The composite projection adapts the map's geometry to scale, to the map's height-to-width ratio, and to the central latitude of the displayed area by replacing projections and adjusting their parameters. The composite projection shows the entire globe including poles; it portrays continents or larger countries with less distortion (optionally without areal distortion); and it can morph to the web Mercator projection for maps showing small regions.},
}
@article{p401,
journal = {IEEE TVCG},
year = 2012,
title = {Algorithms for Labeling Focus Regions},
doi = {10.1109/TVCG.2012.193},
url = {http://dx.doi.org/10.1109/TVCG.2012.193},
author = {Fink, M. and Haunert, J.-H. and Schulz, A. and Spoerhase, J. and Wolff, A.},
pages = {2583--2592},
keywords = {Focus+context techniques, data clustering, mobile and ubiquitous visualization, geographic/geospatial visualization},
abstract = {In this paper, we investigate the problem of labeling point sites in focus regions of maps or diagrams. This problem occurs, for example, when the user of a mapping service wants to see the names of restaurants or other POIs in a crowded downtown area but keep the overview over a larger area. Our approach is to place the labels at the boundary of the focus region and connect each site with its label by a linear connection, which is called a leader. In this way, we move labels from the focus region to the less valuable context region surrounding it. In order to make the leader layout well readable, we present algorithms that rule out crossings between leaders and optimize other characteristics such as total leader length and distance between labels. This yields a new variant of the boundary labeling problem, which has been studied in the literature. Other than in traditional boundary labeling, where leaders are usually schematized polylines, we focus on leaders that are either straight-line segments or Bezier curves. Further, we present algorithms that, given the sites, find a position of the focus region that optimizes the above characteristics. We also consider a variant of the problem where we have more sites than space for labels. In this situation, we assume that the sites are prioritized by the user. Alternatively, we take a new facility-location perspective which yields a clustering of the sites. We label one representative of each cluster. If the user wishes, we apply our approach to the sites within a cluster, giving details on demand.},
}
@article{p402,
journal = {IEEE TVCG},
year = 2012,
title = {An Empirical Model of Slope Ratio Comparisons},
doi = {10.1109/TVCG.2012.196},
url = {http://dx.doi.org/10.1109/TVCG.2012.196},
author = {Talbot, J. and Gerth, J. and Hanrahan, P.},
pages = {2613--2620},
keywords = {Banking to 45 degrees, slope perception, orientation resolution, aspect ratio selection},
abstract = {Comparing slopes is a fundamental graph reading task and the aspect ratio chosen for a plot influences how easy these comparisons are to make. According to Banking to 45°, a classic design guideline first proposed and studied by Cleveland et al., aspect ratios that center slopes around 45° minimize errors in visual judgments of slope ratios. This paper revisits this earlier work. Through exploratory pilot studies that expand Cleveland et al.'s experimental design, we develop an empirical model of slope ratio estimation that fits more extreme slope ratio judgments and two common slope ratio estimation strategies. We then run two experiments to validate our model. In the first, we show that our model fits more generally than the one proposed by Cleveland et al. and we find that, in general, slope ratio errors are not minimized around 45°. In the second experiment, we explore a novel hypothesis raised by our model: that visible baselines can substantially mitigate errors made in slope judgments. We conclude with an application of our model to aspect ratio selection.},
}
@article{p403,
journal = {IEEE TVCG},
year = 2012,
title = {An Empirical Study on Using Visual Embellishments in Visualization},
doi = {10.1109/TVCG.2012.197},
url = {http://dx.doi.org/10.1109/TVCG.2012.197},
author = {Borgo, R. and Abdul-Rahman, A. and Mohamed, F. and Grant, P.W. and Reppa, I. and Floridi, L. and Chen, M.},
pages = {2759--2768},
keywords = {Visual embellishments, metaphors, icons, cognition, working memory, long-term memory, visual search, evaluation},
abstract = {In written and spoken communications, figures of speech (e.g., metaphors and synecdoche) are often used as an aid to help convey abstract or less tangible concepts. However, the benefits of using rhetorical illustrations or embellishments in visualization have so far been inconclusive. In this work, we report an empirical study to evaluate hypotheses that visual embellishments may aid memorization, visual search and concept comprehension. One major departure from related experiments in the literature is that we make use of a dual-task methodology in our experiment. This design offers an abstraction of typical situations where viewers do not have their full attention focused on visualization (e.g., in meetings and lectures). The secondary task introduces “divided attention”, and makes the effects of visual embellishments more observable. In addition, it also serves as additional masking in memory-based trials. The results of this study show that visual embellishments can help participants better remember the information depicted in visualization. On the other hand, visual embellishments can have a negative impact on the speed of visual search. The results show a complex pattern as to the benefits of visual embellishments in helping participants grasp key concepts from visualization.},
}
@article{p404,
journal = {IEEE TVCG},
year = 2012,
title = {Assessing the Effect of Visualizations on Bayesian Reasoning through Crowdsourcing},
doi = {10.1109/TVCG.2012.199},
url = {http://dx.doi.org/10.1109/TVCG.2012.199},
author = {Micallef, L. and Dragicevic, P. and Fekete, J.},
pages = {2536--2545},
keywords = {Bayesian reasoning, base rate fallacy, probabilistic judgment, Euler diagrams, glyphs, crowdsourcing},
abstract = {People have difficulty understanding statistical information and are unaware of their wrong judgments, particularly in Bayesian reasoning. Psychology studies suggest that the way Bayesian problems are represented can impact comprehension, but few visual designs have been evaluated and only populations with a specific background have been involved. In this study, a textual and six visual representations for three classic problems were compared using a diverse subject pool through crowdsourcing. Visualizations included area-proportional Euler diagrams, glyph representations, and hybrid diagrams combining both. Our study failed to replicate previous findings in that subjects' accuracy was remarkably lower and visualizations exhibited no measurable benefit. A second experiment confirmed that simply adding a visualization to a textual Bayesian problem is of little help, even when the text refers to the visualization, but suggests that visualizations are more effective when the text is given without numerical values. We discuss our findings and the need for more such experiments to be carried out on heterogeneous populations of non-experts.},
}
@article{p405,
journal = {IEEE TVCG},
year = 2012,
title = {Beyond Mouse and Keyboard: Expanding Design Considerations for Information Visualization Interactions},
doi = {10.1109/TVCG.2012.204},
url = {http://dx.doi.org/10.1109/TVCG.2012.204},
author = {Bongshin Lee and Isenberg, P. and Riche, N.H. and Carpendale, S.},
pages = {2689--2698},
keywords = {Design considerations, interaction, post-WIMP, NUI (Natural User Interface)},
abstract = {The importance of interaction to Information Visualization (InfoVis) and, in particular, of the interplay between interactivity and cognition is widely recognized [12, 15, 32, 55, 70]. This interplay, combined with the demands from increasingly large and complex datasets, is driving the increased significance of interaction in InfoVis. In parallel, there have been rapid advances in many facets of interaction technologies. However, InfoVis interactions have yet to take full advantage of these new possibilities in interaction technologies, as they largely still employ the traditional desktop, mouse, and keyboard setup of WIMP (Windows, Icons, Menus, and a Pointer) interfaces. In this paper, we reflect more broadly about the role of more “natural” interactions for InfoVis and provide opportunities for future research. We discuss and relate general HCI interaction models to existing InfoVis interaction classifications by looking at interactions from a novel angle, taking into account the entire spectrum of interactions. Our discussion of InfoVis-specific interaction design considerations helps us identify a series of underexplored attributes of interaction that can lead to new, more “natural,” interaction techniques for InfoVis.},
}
@article{p406,
journal = {IEEE TVCG},
year = 2012,
title = {Capturing the Design Space of Sequential Space-filling Layouts},
doi = {10.1109/TVCG.2012.205},
url = {http://dx.doi.org/10.1109/TVCG.2012.205},
author = {Baudel, T. and Broeksema, B.},
pages = {2593--2602},
keywords = {Layout, visualization models, tables & tree layouts, grids, treemaps, mosaic plots, dimensional stacking},
abstract = {We characterize the design space of the algorithms that sequentially tile a rectangular area with smaller, fixed-surface, rectangles. This space consist of five independent dimensions: Order, Size, Score, Recurse and Phrase. Each of these dimensions describe a particular aspect of such layout tasks. This class of layouts is interesting, because, beyond encompassing simple grids, tables and trees, it also includes all kinds of treemaps involving the placement of rectangles. For instance, Slice and dice, Squarified, Strip and Pivot layouts are various points in this five dimensional space. Many classic statistics visualizations, such as 100% stacked bar charts, mosaic plots and dimensional stacking, are also instances of this class. A few new and potentially interesting points in this space are introduced, such as spiral treemaps and variations on the strip layout. The core algorithm is implemented as a JavaScript prototype that can be used as a layout component in a variety of InfoViz toolkits.},
}
@article{p407,
journal = {IEEE TVCG},
year = 2012,
title = {Comparing Clusterings Using Bertin's Idea},
doi = {10.1109/TVCG.2012.207},
url = {http://dx.doi.org/10.1109/TVCG.2012.207},
author = {Pilhofer, A. and Gribov, A. and Unwin, A.},
pages = {2506--2515},
keywords = {Order optimization, fluctuation diagrams, classification, seriation},
abstract = {Classifying a set of objects into clusters can be done in numerous ways, producing different results. They can be visually compared using contingency tables [27], mosaicplots [13], fluctuation diagrams [15], tableplots [20] , (modified) parallel coordinates plots [28], Parallel Sets plots [18] or circos diagrams [19]. Unfortunately the interpretability of all these graphical displays decreases rapidly with the numbers of categories and clusterings. In his famous book A Semiology of Graphics [5] Bertin writes “the discovery of an ordered concept appears as the ultimate point in logical simplification since it permits reducing to a single instant the assimilation of series which previously required many instants of study”. Or in more everyday language, if you use good orderings you can see results immediately that with other orderings might take a lot of effort. This is also related to the idea of effect ordering [12], that data should be organised to reflect the effect you want to observe. This paper presents an efficient algorithm based on Bertin's idea and concepts related to Kendall's t [17], which finds informative joint orders for two or more nominal classification variables. We also show how these orderings improve the various displays and how groups of corresponding categories can be detected using a top-down partitioning algorithm. Different clusterings based on data on the environmental performance of cars sold in Germany are used for illustration. All presented methods are available in the R package extracat which is used to compute the optimized orderings for the example dataset.},
}
@article{p408,
journal = {IEEE TVCG},
year = 2012,
title = {Compressed Adjacency Matrices: Untangling Gene Regulatory Networks},
doi = {10.1109/TVCG.2012.208},
url = {http://dx.doi.org/10.1109/TVCG.2012.208},
author = {Dinkla, K. and Westenberg, M.A. and van Wijk, J.J.},
pages = {2457--2466},
keywords = {Network, gene regulation, scale-free, adjacency matrix},
abstract = {We present a novel technique-Compressed Adjacency Matrices-for visualizing gene regulatory networks. These directed networks have strong structural characteristics: out-degrees with a scale-free distribution, in-degrees bound by a low maximum, and few and small cycles. Standard visualization techniques, such as node-link diagrams and adjacency matrices, are impeded by these network characteristics. The scale-free distribution of out-degrees causes a high number of intersecting edges in node-link diagrams. Adjacency matrices become space-inefficient due to the low in-degrees and the resulting sparse network. Compressed adjacency matrices, however, exploit these structural characteristics. By cutting open and rearranging an adjacency matrix, we achieve a compact and neatly-arranged visualization. Compressed adjacency matrices allow for easy detection of subnetworks with a specific structure, so-called motifs, which provide important knowledge about gene regulatory networks to domain experts. We summarize motifs commonly referred to in the literature, and relate them to network analysis tasks common to the visualization domain. We show that a user can easily find the important motifs in compressed adjacency matrices, and that this is hard in standard adjacency matrix and node-link diagrams. We also demonstrate that interaction techniques for standard adjacency matrices can be used for our compressed variant. These techniques include rearrangement clustering, highlighting, and filtering.},
}
@article{p409,
journal = {IEEE TVCG},
year = 2012,
title = {Design Considerations for Optimizing Storyline Visualizations},
doi = {10.1109/TVCG.2012.212},
url = {http://dx.doi.org/10.1109/TVCG.2012.212},
author = {Tanahashi, Y. and Kwan-Liu Ma},
pages = {2679--2688},
keywords = {Layout algorithm, timeline visualization, storyline visualization, design study},
abstract = {Storyline visualization is a technique used to depict the temporal dynamics of social interactions. This visualization technique was first introduced as a hand-drawn illustration in XKCD's “Movie Narrative Charts” [21]. If properly constructed, the visualization can convey both global trends and local interactions in the data. However, previous methods for automating storyline visualizations are overly simple, failing to achieve some of the essential principles practiced by professional illustrators. This paper presents a set of design considerations for generating aesthetically pleasing and legible storyline visualizations. Our layout algorithm is based on evolutionary computation, allowing us to effectively incorporate multiple objective functions. We show that the resulting visualizations have significantly improved aesthetics and legibility compared to existing techniques.},
}
@article{p410,
journal = {IEEE TVCG},
year = 2012,
title = {Design Study Methodology: Reflections from the Trenches and the Stacks},
doi = {10.1109/TVCG.2012.213},
url = {http://dx.doi.org/10.1109/TVCG.2012.213},
author = {Sedlmair, M. and Meyer, M. and Munzner, T.},
pages = {2431--2440},
keywords = {Design study, methodology, visualization, framework},
abstract = {Design studies are an increasingly popular form of problem-driven visualization research, yet there is little guidance available about how to do them effectively. In this paper we reflect on our combined experience of conducting twenty-one design studies, as well as reading and reviewing many more, and on an extensive literature review of other field work methods and methodologies. Based on this foundation we provide definitions, propose a methodological framework, and provide practical guidance for conducting design studies. We define a design study as a project in which visualization researchers analyze a specific real-world problem faced by domain experts, design a visualization system that supports solving this problem, validate the design, and reflect about lessons learned in order to refine visualization design guidelines. We characterize two axes - a task clarity axis from fuzzy to crisp and an information location axis from the domain expert's head to the computer - and use these axes to reason about design study contributions, their suitability, and uniqueness from other approaches. The proposed methodological framework consists of 9 stages: learn, winnow, cast, discover, design, implement, deploy, reflect, and write. For each stage we provide practical guidance and outline potential pitfalls. We also conducted an extensive literature survey of related methodological approaches that involve a significant amount of qualitative field work, and compare design study methodology to that of ethnography, grounded theory, and action research.},
}
@article{p411,
journal = {IEEE TVCG},
year = 2012,
title = {Different Strokes for Different Folks: Visual Presentation Design between Disciplines},
doi = {10.1109/TVCG.2012.214},
url = {http://dx.doi.org/10.1109/TVCG.2012.214},
author = {Gomez, S.R. and Jianu, R. and Ziemkiewicz, C. and Hua Guo and Laidlaw, D.H.},
pages = {2411--2420},
keywords = {Presentations, information visualization, design, visual analysis},
abstract = {We present an ethnographic study of design differences in visual presentations between academic disciplines. Characterizing design conventions between users and data domains is an important step in developing hypotheses, tools, and design guidelines for information visualization. In this paper, disciplines are compared at a coarse scale between four groups of fields: social, natural, and formal sciences; and the humanities. Two commonplace presentation types were analyzed: electronic slideshows and whiteboard “chalk talks”. We found design differences in slideshows using two methods - coding and comparing manually-selected features, like charts and diagrams, and an image-based analysis using PCA called eigenslides. In whiteboard talks with controlled topics, we observed design behaviors, including using representations and formalisms from a participant's own discipline, that suggest authors might benefit from novel assistive tools for designing presentations. Based on these findings, we discuss opportunities for visualization ethnography and human-centered authoring tools for visual information.},
}
@article{p412,
journal = {IEEE TVCG},
year = 2012,
title = {Does an Eye Tracker Tell the Truth about Visualizations?: findings while Investigating Visualizations for Decision Making},
doi = {10.1109/TVCG.2012.215},
url = {http://dx.doi.org/10.1109/TVCG.2012.215},
author = {Sung-Hee Kim and Zhihua Dong and Hanjun Xian and Upatising, B. and Ji Soo Yi},
pages = {2421--2430},
keywords = {Visualized decision making, eye tracking, crowdsourcing, quantitative empirical study, limitations, peripheral vision},
abstract = {For information visualization researchers, eye tracking has been a useful tool to investigate research participants' underlying cognitive processes by tracking their eye movements while they interact with visual techniques. We used an eye tracker to better understand why participants with a variant of a tabular visualization called `SimulSort' outperformed ones with a conventional table and typical one-column sorting feature (i.e., Typical Sorting). The collected eye-tracking data certainly shed light on the detailed cognitive processes of the participants; SimulSort helped with decision-making tasks by promoting efficient browsing behavior and compensatory decision-making strategies. However, more interestingly, we also found unexpected eye-tracking patterns with Simul- Sort. We investigated the cause of the unexpected patterns through a crowdsourcing-based study (i.e., Experiment 2), which elicited an important limitation of the eye tracking method: incapability of capturing peripheral vision. This particular result would be a caveat for other visualization researchers who plan to use an eye tracker in their studies. In addition, the method to use a testing stimulus (i.e., influential column) in Experiment 2 to verify the existence of such limitations would be useful for researchers who would like to verify their eye tracking results.},
}
@article{p413,
journal = {IEEE TVCG},
year = 2012,
title = {Evaluating Sketchiness as a Visual Variable for the Depiction of Qualitative Uncertainty},
doi = {10.1109/TVCG.2012.220},
url = {http://dx.doi.org/10.1109/TVCG.2012.220},
author = {Boukhelifa, N. and Bezerianos, A. and Isenberg, T. and Fekete, J.},
pages = {2769--2778},
keywords = {Uncertainty visualization, qualitative evaluation, quantitative evaluation, perception},
abstract = {We report on results of a series of user studies on the perception of four visual variables that are commonly used in the literature to depict uncertainty. To the best of our knowledge, we provide the first formal evaluation of the use of these variables to facilitate an easier reading of uncertainty in visualizations that rely on line graphical primitives. In addition to blur, dashing and grayscale, we investigate the use of `sketchiness' as a visual variable because it conveys visual impreciseness that may be associated with data quality. Inspired by work in non-photorealistic rendering and by the features of hand-drawn lines, we generate line trajectories that resemble hand-drawn strokes of various levels of proficiency-ranging from child to adult strokes-where the amount of perturbations in the line corresponds to the level of uncertainty in the data. Our results show that sketchiness is a viable alternative for the visualization of uncertainty in lines and is as intuitive as blur; although people subjectively prefer dashing style over blur, grayscale and sketchiness. We discuss advantages and limitations of each technique and conclude with design considerations on how to deploy these visual variables to effectively depict various levels of uncertainty for line marks.},
}
@article{p414,
journal = {IEEE TVCG},
year = 2012,
title = {Evaluating the Effect of Style in Information Visualization},
doi = {10.1109/TVCG.2012.221},
url = {http://dx.doi.org/10.1109/TVCG.2012.221},
author = {Vande Moere, A. and Tomitsch, M. and Wimmer, C. and Christoph, B. and Grechenig, T.},
pages = {2739--2748},
keywords = {Visualization, design, style, aesthetics, evaluation, online study, user experience},
abstract = {This paper reports on a between-subject, comparative online study of three information visualization demonstrators that each displayed the same dataset by way of an identical scatterplot technique, yet were different in style in terms of visual and interactive embellishment. We validated stylistic adherence and integrity through a separate experiment in which a small cohort of participants assigned our three demonstrators to predefined groups of stylistic examples, after which they described the styles with their own words. From the online study, we discovered significant differences in how participants execute specific interaction operations, and the types of insights that followed from them. However, in spite of significant differences in apparent usability, enjoyability and usefulness between the style demonstrators, no variation was found on the self-reported depth, expert-rated depth, confidence or difficulty of the resulting insights. Three different methods of insight analysis have been applied, revealing how style impacts the creation of insights, ranging from higher-level pattern seeking to a more reflective and interpretative engagement with content, which is what underlies the patterns. As this study only forms the first step in determining how the impact of style in information visualization could be best evaluated, we propose several guidelines and tips on how to gather, compare and categorize insights through an online evaluation study, particularly in terms of analyzing the concise, yet wide variety of insights and observations in a trustworthy and reproducable manner.},
}
@article{p415,
journal = {IEEE TVCG},
year = 2012,
title = {Exploring Flow, Factors, and Outcomes of Temporal Event Sequences with the Outflow Visualization},
doi = {10.1109/TVCG.2012.225},
url = {http://dx.doi.org/10.1109/TVCG.2012.225},
author = {Wongsuphasawat, K. and Gotz, D.},
pages = {2659--2668},
keywords = {Outflow, information visualization, temporal event sequences, state diagram, state transition},
abstract = {Event sequence data is common in many domains, ranging from electronic medical records (EMRs) to sports events. Moreover, such sequences often result in measurable outcomes (e.g., life or death, win or loss). Collections of event sequences can be aggregated together to form event progression pathways. These pathways can then be connected with outcomes to model how alternative chains of events may lead to different results. This paper describes the Outflow visualization technique, designed to (1) aggregate multiple event sequences, (2) display the aggregate pathways through different event states with timing and cardinality, (3) summarize the pathways' corresponding outcomes, and (4) allow users to explore external factors that correlate with specific pathway state transitions. Results from a user study with twelve participants show that users were able to learn how to use Outflow easily with limited training and perform a range of tasks both accurately and rapidly.},
}
@article{p416,
journal = {IEEE TVCG},
year = 2012,
title = {Facilitating Discourse Analysis with Interactive Visualization},
doi = {10.1109/TVCG.2012.226},
url = {http://dx.doi.org/10.1109/TVCG.2012.226},
author = {Jian Zhao and Chevalier, F. and Collins, C. and Balakrishnan, R.},
pages = {2639--2648},
keywords = {Discourse structure, tree comparison, computational linguisitics, visual analytics, interaction techniques},
abstract = {A discourse parser is a natural language processing system which can represent the organization of a document based on a rhetorical structure tree-one of the key data structures enabling applications such as text summarization, question answering and dialogue generation. Computational linguistics researchers currently rely on manually exploring and comparing the discourse structures to get intuitions for improving parsing algorithms. In this paper, we present DAViewer, an interactive visualization system for assisting computational linguistics researchers to explore, compare, evaluate and annotate the results of discourse parsers. An iterative user-centered design process with domain experts was conducted in the development of DAViewer. We report the results of an informal formative study of the system to better understand how the proposed visualization and interaction techniques are used in the real research environment.},
}
@article{p417,
journal = {IEEE TVCG},
year = 2012,
title = {Graphical Overlays: Using Layered Elements to Aid Chart Reading},
doi = {10.1109/TVCG.2012.229},
url = {http://dx.doi.org/10.1109/TVCG.2012.229},
author = {Kong, N. and Agrawala, M.},
pages = {2631--2638},
keywords = {Visualization, overlays, graphical perception, graph comprehension},
abstract = {Reading a visualization can involve a number of tasks such as extracting, comparing or aggregating numerical values. Yet, most of the charts that are published in newspapers, reports, books, and on the Web only support a subset of these tasks. In this paper we introduce graphical overlays-visual elements that are layered onto charts to facilitate a larger set of chart reading tasks. These overlays directly support the lower-level perceptual and cognitive processes that viewers must perform to read a chart. We identify five main types of overlays that support these processes; the overlays can provide (1) reference structures such as gridlines, (2) highlights such as outlines around important marks, (3) redundant encodings such as numerical data labels, (4) summary statistics such as the mean or max and (5) annotations such as descriptive text for context. We then present an automated system that applies user-chosen graphical overlays to existing chart bitmaps. Our approach is based on the insight that generating most of these graphical overlays only requires knowing the properties of the visual marks and axes that encode the data, but does not require access to the underlying data values. Thus, our system analyzes the chart bitmap to extract only the properties necessary to generate the desired overlay. We also discuss techniques for generating interactive overlays that provide additional controls to viewers. We demonstrate several examples of each overlay type for bar, pie and line charts.},
}
@article{p418,
journal = {IEEE TVCG},
year = 2012,
title = {Graphical Tests for Power Comparison of Competing Designs},
doi = {10.1109/TVCG.2012.230},
url = {http://dx.doi.org/10.1109/TVCG.2012.230},
author = {Hofmann, H. and Follett, L. and Majumder, M. and Cook, D.},
pages = {2441--2448},
keywords = {Lineups, Visual inference, Power comparison, Efficiency of displays},
abstract = {Lineups [4, 28] have been established as tools for visual testing similar to standard statistical inference tests, allowing us to evaluate the validity of graphical findings in an objective manner. In simulation studies [12] lineups have been shown as being efficient: the power of visual tests is comparable to classical tests while being much less stringent in terms of distributional assumptions made. This makes lineups versatile, yet powerful, tools in situations where conditions for regular statistical tests are not or cannot be met. In this paper we introduce lineups as a tool for evaluating the power of competing graphical designs. We highlight some of the theoretical properties and then show results from two studies evaluating competing designs: both studies are designed to go to the limits of our perceptual abilities to highlight differences between designs. We use both accuracy and speed of evaluation as measures of a successful design. The first study compares the choice of coordinate system: polar versus cartesian coordinates. The results show strong support in favor of cartesian coordinates in finding fast and accurate answers to spotting patterns. The second study is aimed at finding shift differences between distributions. Both studies are motivated by data problems that we have recently encountered, and explore using simulated data to evaluate the plot designs under controlled conditions. Amazon Mechanical Turk (MTurk) is used to conduct the studies. The lineups provide an effective mechanism for objectively evaluating plot designs.},
}
@article{p419,
journal = {IEEE TVCG},
year = 2012,
title = {How Capacity Limits of Attention Influence Information Visualization Effectiveness},
doi = {10.1109/TVCG.2012.233},
url = {http://dx.doi.org/10.1109/TVCG.2012.233},
author = {Haroz, S. and Whitney, D.},
pages = {2402--2410},
keywords = {Perception, attention, color, motion, user study, nominal axis, layout, goal-oriented design},
abstract = {In this paper, we explore how the capacity limits of attention influence the effectiveness of information visualizations. We conducted a series of experiments to test how visual feature type (color vs. motion), layout, and variety of visual elements impacted user performance. The experiments tested users' abilities to (1) determine if a specified target is on the screen, (2) detect an odd-ball, deviant target, different from the other visible objects, and (3) gain a qualitative overview by judging the number of unique categories on the screen. Our results show that the severe capacity limits of attention strongly modulate the effectiveness of information visualizations, particularly the ability to detect unexpected information. Keeping in mind these capacity limits, we conclude with a set of design guidelines which depend on a visualization's intended use.},
}
@article{p420,
journal = {IEEE TVCG},
year = 2012,
title = {Intelligent Graph Layout Using Many Users' Input},
doi = {10.1109/TVCG.2012.236},
url = {http://dx.doi.org/10.1109/TVCG.2012.236},
author = {Xiaoru Yuan and Limei Che and Yifan Hu and Xin Zhang},
pages = {2699--2708},
keywords = {Graph layout, Laplacian matrix, force directed layout, stress model, merging, editing, crowd sourcing},
abstract = {In this paper, we propose a new strategy for graph drawing utilizing layouts of many sub-graphs supplied by a large group of people in a crowd sourcing manner. We developed an algorithm based on Laplacian constrained distance embedding to merge subgraphs submitted by different users, while attempting to maintain the topological information of the individual input layouts. To facilitate collection of layouts from many people, a light-weight interactive system has been designed to enable convenient dynamic viewing, modification and traversing between layouts. Compared with other existing graph layout algorithms, our approach can achieve more aesthetic and meaningful layouts with high user preference.},
}
@article{p421,
journal = {IEEE TVCG},
year = 2012,
title = {Interaction Support for Visual Comparison Inspired by Natural Behavior},
doi = {10.1109/TVCG.2012.237},
url = {http://dx.doi.org/10.1109/TVCG.2012.237},
author = {Tominski, C. and Forsell, C. and Johansson, J.},
pages = {2719--2728},
keywords = {Interaction techniques, visual comparison, visualization, human-computer interaction, natural interaction},
abstract = {Visual comparison is an intrinsic part of interactive data exploration and analysis. The literature provides a large body of existing solutions that help users accomplish comparison tasks. These solutions are mostly of visual nature and custom-made for specific data. We ask the question if a more general support is possible by focusing on the interaction aspect of comparison tasks. As an answer to this question, we propose a novel interaction concept that is inspired by real-world behavior of people comparing information printed on paper. In line with real-world interaction, our approach supports users (1) in interactively specifying pieces of graphical information to be compared, (2) in flexibly arranging these pieces on the screen, and (3) in performing the actual comparison of side-by-side and overlapping arrangements of the graphical information. Complementary visual cues and add-ons further assist users in carrying out comparison tasks. Our concept and the integrated interaction techniques are generally applicable and can be coupled with different visualization techniques. We implemented an interactive prototype and conducted a qualitative user study to assess the concept's usefulness in the context of three different visualization techniques. The obtained feedback indicates that our interaction techniques mimic the natural behavior quite well, can be learned quickly, and are easy to apply to visual comparison tasks.},
}
@article{p422,
journal = {IEEE TVCG},
year = 2012,
title = {Interactive Level-of-Detail Rendering of Large Graphs},
doi = {10.1109/TVCG.2012.238},
url = {http://dx.doi.org/10.1109/TVCG.2012.238},
author = {Zinsmaier, M. and Brandes, U. and Deussen, O. and Strobelt, H.},
pages = {2486--2495},
keywords = {Graph visualization, OpenGL, edge aggregation},
abstract = {We propose a technique that allows straight-line graph drawings to be rendered interactively with adjustable level of detail. The approach consists of a novel combination of edge cumulation with density-based node aggregation and is designed to exploit common graphics hardware for speed. It operates directly on graph data and does not require precomputed hierarchies or meshes. As proof of concept, we present an implementation that scales to graphs with millions of nodes and edges, and discuss several example applications.},
}
@article{p423,
journal = {IEEE TVCG},
year = 2012,
title = {Living Liquid: Design and Evaluation of an Exploratory Visualization Tool for Museum Visitors},
doi = {10.1109/TVCG.2012.244},
url = {http://dx.doi.org/10.1109/TVCG.2012.244},
author = {Ma, J. and Liao, I. and Kwan-Liu Ma and Frazier, J.},
pages = {2799--2808},
keywords = {Information visualization, user interaction, evaluation, user studies, science museums, informal learning environments},
abstract = {Interactive visualizations can allow science museum visitors to explore new worlds by seeing and interacting with scientific data. However, designing interactive visualizations for informal learning environments, such as museums, presents several challenges. First, visualizations must engage visitors on a personal level. Second, visitors often lack the background to interpret visualizations of scientific data. Third, visitors have very limited time at individual exhibits in museums. This paper examines these design considerations through the iterative development and evaluation of an interactive exhibit as a visualization tool that gives museumgoers access to scientific data generated and used by researchers. The exhibit prototype, Living Liquid, encourages visitors to ask and answer their own questions while exploring the time-varying global distribution of simulated marine microbes using a touchscreen interface. Iterative development proceeded through three rounds of formative evaluations using think-aloud protocols and interviews, each round informing a key visualization design decision: (1) what to visualize to initiate inquiry, (2) how to link data at the microscopic scale to global patterns, and (3) how to include additional data that allows visitors to pursue their own questions. Data from visitor evaluations suggests that, when designing visualizations for public audiences, one should (1) avoid distracting visitors from data that they should explore, (2) incorporate background information into the visualization, (3) favor understandability over scientific accuracy, and (4) layer data accessibility to structure inquiry. Lessons learned from this case study add to our growing understanding of how to use visualizations to actively engage learners with scientific data.},
}
@article{p424,
journal = {IEEE TVCG},
year = 2012,
title = {Memorability of Visual Features in Network Diagrams},
doi = {10.1109/TVCG.2012.245},
url = {http://dx.doi.org/10.1109/TVCG.2012.245},
author = {Marriott, K. and Purchase, H. and Wybrow, M. and Goncu, C.},
pages = {2477--2485},
keywords = {Network diagrams, graph layout, perceptual theories, visual features, diagram recall, experiment},
abstract = {We investigate the cognitive impact of various layout features-symmetry, alignment, collinearity, axis alignment and orthogonality - on the recall of network diagrams (graphs). This provides insight into how people internalize these diagrams and what features should or shouldn't be utilised when designing static and interactive network-based visualisations. Participants were asked to study, remember, and draw a series of small network diagrams, each drawn to emphasise a particular visual feature. The visual features were based on existing theories of perception, and the task enabled visual processing at the visceral level only. Our results strongly support the importance of visual features such as symmetry, collinearity and orthogonality, while not showing any significant impact for node-alignment or parallel edges.},
}
@article{p425,
journal = {IEEE TVCG},
year = 2012,
title = {Organizing Search Results with a Reference Map},
doi = {10.1109/TVCG.2012.250},
url = {http://dx.doi.org/10.1109/TVCG.2012.250},
author = {Nocaj, A. and Brandes, U.},
pages = {2546--2555},
keywords = {Search results, mental map, voronoi treemaps, dynamic graph layout, multidimensional scaling, edge bundling},
abstract = {We propose a method to highlight query hits in hierarchically clustered collections of interrelated items such as digital libraries or knowledge bases. The method is based on the idea that organizing search results similarly to their arrangement on a fixed reference map facilitates orientation and assessment by preserving a user's mental map. Here, the reference map is built from an MDS layout of the items in a Voronoi treemap representing their hierarchical clustering, and we use techniques from dynamic graph layout to align query results with the map. The approach is illustrated on an archive of newspaper articles.},
}
@article{p426,
journal = {IEEE TVCG},
year = 2012,
title = {Perception of Visual Variables on Tiled Wall-Sized Displays for Information Visualization Applications},
doi = {10.1109/TVCG.2012.251},
url = {http://dx.doi.org/10.1109/TVCG.2012.251},
author = {Bezerianos, A. and Isenberg, P.},
pages = {2516--2525},
keywords = {Information visualization, perception, wall-displays},
abstract = {We present the results of two user studies on the perception of visual variables on tiled high-resolution wall-sized displays. We contribute an understanding of, and indicators predicting how, large variations in viewing distances and viewing angles affect the accurate perception of angles, areas, and lengths. Our work, thus, helps visualization researchers with design considerations on how to create effective visualizations for these spaces. The first study showed that perception accuracy was impacted most when viewers were close to the wall but differently for each variable (Angle, Area, Length). Our second study examined the effect of perception when participants could move freely compared to when they had a static viewpoint. We found that a far but static viewpoint was as accurate but less time consuming than one that included free motion. Based on our findings, we recommend encouraging viewers to stand further back from the display when conducting perception estimation tasks. If tasks need to be conducted close to the wall display, important information should be placed directly in front of the viewer or above, and viewers should be provided with an estimation of the distortion effects predicted by our work-or encouraged to physically navigate the wall in specific ways to reduce judgement error.},
}
@article{p427,
journal = {IEEE TVCG},
year = 2012,
title = {PivotPaths: Strolling through Faceted Information Spaces},
doi = {10.1109/TVCG.2012.252},
url = {http://dx.doi.org/10.1109/TVCG.2012.252},
author = {Dork, M. and Riche, N.H. and Ramos, G. and Dumais, S.},
pages = {2709--2718},
keywords = {Information visualization, interactivity, node-link diagrams, animation, information seeking, exploratory search},
abstract = {We present PivotPaths, an interactive visualization for exploring faceted information resources. During both work and leisure, we increasingly interact with information spaces that contain multiple facets and relations, such as authors, keywords, and citations of academic publications, or actors and genres of movies. To navigate these interlinked resources today, one typically selects items from facet lists resulting in abrupt changes from one subset of data to another. While filtering is useful to retrieve results matching specific criteria, it can be difficult to see how facets and items relate and to comprehend the effect of filter operations. In contrast, the PivotPaths interface exposes faceted relations as visual paths in arrangements that invite the viewer to `take a stroll' through an information space. PivotPaths supports pivot operations as lightweight interaction techniques that trigger gradual transitions between views. We designed the interface to allow for casual traversal of large collections in an aesthetically pleasing manner that encourages exploration and serendipitous discoveries. This paper shares the findings from our iterative design-and-evaluation process that included semi-structured interviews and a two-week deployment of PivotPaths applied to a large database of academic publications.},
}
@article{p428,
journal = {IEEE TVCG},
year = 2012,
title = {RankExplorer: Visualization of Ranking Changes in Large Time Series Data},
doi = {10.1109/TVCG.2012.253},
url = {http://dx.doi.org/10.1109/TVCG.2012.253},
author = {Conglei Shi and Weiwei Cui and Shixia Liu and Panpan Xu and Wei Chen and Huamin Qu},
pages = {2669--2678},
keywords = {Time-series data, ranking change, Themeriver, interaction techniques},
abstract = {For many applications involving time series data, people are often interested in the changes of item values over time as well as their ranking changes. For example, people search many words via search engines like Google and Bing every day. Analysts are interested in both the absolute searching number for each word as well as their relative rankings. Both sets of statistics may change over time. For very large time series data with thousands of items, how to visually present ranking changes is an interesting challenge. In this paper, we propose RankExplorer, a novel visualization method based on ThemeRiver to reveal the ranking changes. Our method consists of four major components: 1) a segmentation method which partitions a large set of time series curves into a manageable number of ranking categories; 2) an extended ThemeRiver view with embedded color bars and changing glyphs to show the evolution of aggregation values related to each ranking category over time as well as the content changes in each ranking category; 3) a trend curve to show the degree of ranking changes over time; 4) rich user interactions to support interactive exploration of ranking changes. We have applied our method to some real time series data and the case studies demonstrate that our method can reveal the underlying patterns related to ranking changes which might otherwise be obscured in traditional visualizations.},
}
@article{p429,
journal = {IEEE TVCG},
year = 2012,
title = {RelEx: Visualization for Actively Changing Overlay Network Specifications},
doi = {10.1109/TVCG.2012.255},
url = {http://dx.doi.org/10.1109/TVCG.2012.255},
author = {Sedlmair, M. and Frank, A. and Munzner, T. and Butz, A.},
pages = {2729--2738},
keywords = {Network visualization, change management, traffic routing, traffic optimization, automotive, design study},
abstract = {We present a network visualization design study focused on supporting automotive engineers who need to specify and optimize traffic patterns for in-car communication networks. The task and data abstractions that we derived support actively making changes to an overlay network, where logical communication specifications must be mapped to an underlying physical network. These abstractions are very different from the dominant use case in visual network analysis, namely identifying clusters and central nodes, that stems from the domain of social network analysis. Our visualization tool RelEx was created and iteratively refined through a full user-centered design process that included a full problem characterization phase before tool design began, paper prototyping, iterative refinement in close collaboration with expert users for formative evaluation, deployment in the field with real analysts using their own data, usability testing with non-expert users, and summative evaluation at the end of the deployment. In the summative post-deployment study, which entailed domain experts using the tool over several weeks in their daily practice, we documented many examples where the use of RelEx simplified or sped up their work compared to previous practices.},
}
@article{p430,
journal = {IEEE TVCG},
year = 2012,
title = {Representative Factor Generation for the Interactive Visual Analysis of High-Dimensional Data},
doi = {10.1109/TVCG.2012.256},
url = {http://dx.doi.org/10.1109/TVCG.2012.256},
author = {Turkay, C. and Lundervold, A. and Lundervold, A. and Hauser, H.},
pages = {2621--2630},
keywords = {Interactive visual analysis, high-dimensional data analysis},
abstract = {Datasets with a large number of dimensions per data item (hundreds or more) are challenging both for computational and visual analysis. Moreover, these dimensions have different characteristics and relations that result in sub-groups and/or hierarchies over the set of dimensions. Such structures lead to heterogeneity within the dimensions. Although the consideration of these structures is crucial for the analysis, most of the available analysis methods discard the heterogeneous relations among the dimensions. In this paper, we introduce the construction and utilization of representative factors for the interactive visual analysis of structures in high-dimensional datasets. First, we present a selection of methods to investigate the sub-groups in the dimension set and associate representative factors with those groups of dimensions. Second, we introduce how these factors are included in the interactive visual analysis cycle together with the original dimensions. We then provide the steps of an analytical procedure that iteratively analyzes the datasets through the use of representative factors. We discuss how our methods improve the reliability and interpretability of the analysis process by enabling more informed selections of computational tools. Finally, we demonstrate our techniques on the analysis of brain imaging study results that are performed over a large group of subjects.},
}
@article{p431,
journal = {IEEE TVCG},
year = 2012,
title = {Sketchy Rendering for Information Visualization},
doi = {10.1109/TVCG.2012.262},
url = {http://dx.doi.org/10.1109/TVCG.2012.262},
author = {Wood, J. and Isenberg, P. and Isenberg, T. and Dykes, J. and Boukhelifa, N. and Slingsby, A.},
pages = {2749--2758},
keywords = {NPR, non-photorealistic rendering, sketch, hand-drawn, uncertainty, visualization},
abstract = {We present and evaluate a framework for constructing sketchy style information visualizations that mimic data graphics drawn by hand. We provide an alternative renderer for the Processing graphics environment that redefines core drawing primitives including line, polygon and ellipse rendering. These primitives allow higher-level graphical features such as bar charts, line charts, treemaps and node-link diagrams to be drawn in a sketchy style with a specified degree of sketchiness. The framework is designed to be easily integrated into existing visualization implementations with minimal programming modification or design effort. We show examples of use for statistical graphics, conveying spatial imprecision and for enhancing aesthetic and narrative qualities of visualization. We evaluate user perception of sketchiness of areal features through a series of stimulus-response tests in order to assess users' ability to place sketchiness on a ratio scale, and to estimate area. Results suggest relative area judgment is compromised by sketchy rendering and that its influence is dependent on the shape being rendered. They show that degree of sketchiness may be judged on an ordinal scale but that its judgement varies strongly between individuals. We evaluate higher-level impacts of sketchiness through user testing of scenarios that encourage user engagement with data visualization and willingness to critique visualization design. Results suggest that where a visualization is clearly sketchy, engagement may be increased and that attitudes to participating in visualization annotation are more positive. The results of our work have implications for effective information visualization design that go beyond the traditional role of sketching as a tool for prototyping or its use for an indication of general uncertainty.},
}
@article{p432,
journal = {IEEE TVCG},
year = 2012,
title = {SnapShot: Visualization to Propel Ice Hockey Analytics},
doi = {10.1109/TVCG.2012.263},
url = {http://dx.doi.org/10.1109/TVCG.2012.263},
author = {Pileggi, H. and Stolper, C.D. and Boyle, J.M. and Stasko, J.},
pages = {2819--2828},
keywords = {Visual knowledge discovery, visual knowledge representation, hypothesis testing, visual evidence, human computer interaction},
abstract = {Sports analysts live in a world of dynamic games flattened into tables of numbers, divorced from the rinks, pitches, and courts where they were generated. Currently, these professional analysts use R, Stata, SAS, and other statistical software packages for uncovering insights from game data. Quantitative sports consultants seek a competitive advantage both for their clients and for themselves as analytics becomes increasingly valued by teams, clubs, and squads. In order for the information visualization community to support the members of this blossoming industry, it must recognize where and how visualization can enhance the existing analytical workflow. In this paper, we identify three primary stages of today's sports analyst's routine where visualization can be beneficially integrated: 1) exploring a dataspace; 2) sharing hypotheses with internal colleagues; and 3) communicating findings to stakeholders.Working closely with professional ice hockey analysts, we designed and built SnapShot, a system to integrate visualization into the hockey intelligence gathering process. SnapShot employs a variety of information visualization techniques to display shot data, yet given the importance of a specific hockey statistic, shot length, we introduce a technique, the radial heat map. Through a user study, we received encouraging feedback from several professional analysts, both independent consultants and professional team personnel.},
}
@article{p433,
journal = {IEEE TVCG},
year = 2012,
title = {Spatial Text Visualization Using Automatic Typographic Maps},
doi = {10.1109/TVCG.2012.264},
url = {http://dx.doi.org/10.1109/TVCG.2012.264},
author = {Afzal, S. and Maciejewski, R. and Yun Jang and Elmqvist, N. and Ebert, D.S.},
pages = {2556--2564},
keywords = {Geovisualization, spatial data, text visualization, label placement},
abstract = {We present a method for automatically building typographic maps that merge text and spatial data into a visual representation where text alone forms the graphical features. We further show how to use this approach to visualize spatial data such as traffic density, crime rate, or demographic data. The technique accepts a vector representation of a geographic map and spatializes the textual labels in the space onto polylines and polygons based on user-defined visual attributes and constraints. Our sample implementation runs as a Web service, spatializing shape files from the OpenStreetMap project into typographic maps for any region.},
}
@article{p434,
journal = {IEEE TVCG},
year = 2012,
title = {Stacking-Based Visualization of Trajectory Attribute Data},
doi = {10.1109/TVCG.2012.265},
url = {http://dx.doi.org/10.1109/TVCG.2012.265},
author = {Tominski, C. and Schumann, H. and Andrienko, G. and Andrienko, N.},
pages = {2565--2574},
keywords = {Visualization, interaction, exploratory analysis, trajectory attribute data, spatio-temporal data},
abstract = {Visualizing trajectory attribute data is challenging because it involves showing the trajectories in their spatio-temporal context as well as the attribute values associated with the individual points of trajectories. Previous work on trajectory visualization addresses selected aspects of this problem, but not all of them. We present a novel approach to visualizing trajectory attribute data. Our solution covers space, time, and attribute values. Based on an analysis of relevant visualization tasks, we designed the visualization solution around the principle of stacking trajectory bands. The core of our approach is a hybrid 2D/3D display. A 2D map serves as a reference for the spatial context, and the trajectories are visualized as stacked 3D trajectory bands along which attribute values are encoded by color. Time is integrated through appropriate ordering of bands and through a dynamic query mechanism that feeds temporally aggregated information to a circular time display. An additional 2D time graph shows temporal information in full detail by stacking 2D trajectory bands. Our solution is equipped with analytical and interactive mechanisms for selecting and ordering of trajectories, and adjusting the color mapping, as well as coordinated highlighting and dedicated 3D navigation. We demonstrate the usefulness of our novel visualization by three examples related to radiation surveillance, traffic analysis, and maritime navigation. User feedback obtained in a small experiment indicates that our hybrid 2D/3D solution can be operated quite well.},
}
@article{p435,
journal = {IEEE TVCG},
year = 2012,
title = {Taxonomy-Based Glyph Design---with a Case Study on Visualizing Workflows of Biological Experiments},
doi = {10.1109/TVCG.2012.271},
url = {http://dx.doi.org/10.1109/TVCG.2012.271},
author = {Maguire, E. and Rocca-Serra, P. and Sansone, S.-A. and Davies, J. and Chen, M.},
pages = {2603--2612},
keywords = {Glyph-based techniques, taxonomies, design methodologies, bioinformatics visualization},
abstract = {Glyph-based visualization can offer elegant and concise presentation of multivariate information while enhancing speed and ease in visual search experienced by users. As with icon designs, glyphs are usually created based on the designers' experience and intuition, often in a spontaneous manner. Such a process does not scale well with the requirements of applications where a large number of concepts are to be encoded using glyphs. To alleviate such limitations, we propose a new systematic process for glyph design by exploring the parallel between the hierarchy of concept categorization and the ordering of discriminative capacity of visual channels. We examine the feasibility of this approach in an application where there is a pressing need for an efficient and effective means to visualize workflows of biological experiments. By processing thousands of workflow records in a public archive of biological experiments, we demonstrate that a cost-effective glyph design can be obtained by following a process of formulating a taxonomy with the aid of computation, identifying visual channels hierarchically, and defining application-specific abstraction and metaphors.},
}
@article{p436,
journal = {IEEE TVCG},
year = 2012,
title = {The DeepTree Exhibit: Visualizing the Tree of Life to Facilitate Informal Learning},
doi = {10.1109/TVCG.2012.272},
url = {http://dx.doi.org/10.1109/TVCG.2012.272},
author = {Block, F. and Horn, M.S. and Phillips, B.C. and Diamond, J. and Evans, E.M. and Chia Shen},
pages = {2789--2798},
keywords = {Informal science education, collaborative learning, large tree visualizations, multi-touch interaction},
abstract = {In this paper, we present the DeepTree exhibit, a multi-user, multi-touch interactive visualization of the Tree of Life. We developed DeepTree to facilitate collaborative learning of evolutionary concepts. We will describe an iterative process in which a team of computer scientists, learning scientists, biologists, and museum curators worked together throughout design, development, and evaluation. We present the importance of designing the interactions and the visualization hand-in-hand in order to facilitate active learning. The outcome of this process is a fractal-based tree layout that reduces visual complexity while being able to capture all life on earth; a custom rendering and navigation engine that prioritizes visual appeal and smooth fly-through; and a multi-user interface that encourages collaborative exploration while offering guided discovery. We present an evaluation showing that the large dataset encouraged free exploration, triggers emotional responses, and facilitates visitor engagement and informal learning.},
}
@article{p437,
journal = {IEEE TVCG},
year = 2012,
title = {Understanding Pen and Touch Interaction for Data Exploration on Interactive Whiteboards},
doi = {10.1109/TVCG.2012.275},
url = {http://dx.doi.org/10.1109/TVCG.2012.275},
author = {Walny, J. and Bongshin Lee and Johns, P. and Riche, N.H. and Carpendale, S.},
pages = {2779--2788},
keywords = {Pen and touch, interaction, Wizard of Oz, whiteboard, data exploration},
abstract = {Current interfaces for common information visualizations such as bar graphs, line graphs, and scatterplots usually make use of the WIMP (Windows, Icons, Menus and a Pointer) interface paradigm with its frequently discussed problems of multiple levels of indirection via cascading menus, dialog boxes, and control panels. Recent advances in interface capabilities such as the availability of pen and touch interaction challenge us to re-think this and investigate more direct access to both the visualizations and the data they portray. We conducted a Wizard of Oz study to explore applying pen and touch interaction to the creation of information visualization interfaces on interactive whiteboards without implementing a plethora of recognizers. Our wizard acted as a robust and flexible pen and touch recognizer, giving participants maximum freedom in how they interacted with the system. Based on our qualitative analysis of the interactions our participants used, we discuss our insights about pen and touch interactions in the context of learnability and the interplay between pen and touch gestures. We conclude with suggestions for designing pen and touch enabled interactive visualization interfaces.},
}
@article{p438,
journal = {IEEE TVCG},
year = 2012,
title = {Visual Semiotics & Uncertainty Visualization: An Empirical Study},
doi = {10.1109/TVCG.2012.279},
url = {http://dx.doi.org/10.1109/TVCG.2012.279},
author = {MacEachren, A.M. and Roth, R.E. and O'Brien, J. and Li, B. and Swingley, D. and Gahegan, M.},
pages = {2496--2505},
keywords = {Uncertainty visualization, uncertainty categories, visual variables, semiotics},
abstract = {This paper presents two linked empirical studies focused on uncertainty visualization. The experiments are framed from two conceptual perspectives. First, a typology of uncertainty is used to delineate kinds of uncertainty matched with space, time, and attribute components of data. Second, concepts from visual semiotics are applied to characterize the kind of visual signification that is appropriate for representing those different categories of uncertainty. This framework guided the two experiments reported here. The first addresses representation intuitiveness, considering both visual variables and iconicity of representation. The second addresses relative performance of the most intuitive abstract and iconic representations of uncertainty on a map reading task. Combined results suggest initial guidelines for representing uncertainty and discussion focuses on practical applicability of results.},
}
@article{p439,
journal = {IEEE TVCG},
year = 2012,
title = {Visualizing Flow of Uncertainty through Analytical Processes},
doi = {10.1109/TVCG.2012.285},
url = {http://dx.doi.org/10.1109/TVCG.2012.285},
author = {Yingcai Wu and Guo-Xun Yuan and Kwan-Liu Ma},
pages = {2526--2535},
keywords = {Uncertainty visualization, uncertainty quantification, uncertainty propagation, error ellipsoids, uncertainty fusion},
abstract = {Uncertainty can arise in any stage of a visual analytics process, especially in data-intensive applications with a sequence of data transformations. Additionally, throughout the process of multidimensional, multivariate data analysis, uncertainty due to data transformation and integration may split, merge, increase, or decrease. This dynamic characteristic along with other features of uncertainty pose a great challenge to effective uncertainty-aware visualization. This paper presents a new framework for modeling uncertainty and characterizing the evolution of the uncertainty information through analytical processes. Based on the framework, we have designed a visual metaphor called uncertainty flow to visually and intuitively summarize how uncertainty information propagates over the whole analysis pipeline. Our system allows analysts to interact with and analyze the uncertainty information at different levels of detail. Three experiments were conducted to demonstrate the effectiveness and intuitiveness of our design.},
}
@article{p440,
journal = {IEEE TVCG},
year = 2012,
title = {Visualizing Network Traffic to Understand the Performance of Massively Parallel Simulations},
doi = {10.1109/TVCG.2012.286},
url = {http://dx.doi.org/10.1109/TVCG.2012.286},
author = {Landge, A.G. and Levine, J.A. and Bhatele, A. and Isaacs, K.E. and Gamblin, T. and Schulz, M. and Langer, S. and Bremer, P.-T. and Pascucci, V.},
pages = {2467--2476},
keywords = {Performance analysis, network traffic visualization, projected graph layouts},
abstract = {The performance of massively parallel applications is often heavily impacted by the cost of communication among compute nodes. However, determining how to best use the network is a formidable task, made challenging by the ever increasing size and complexity of modern supercomputers. This paper applies visualization techniques to aid parallel application developers in understanding the network activity by enabling a detailed exploration of the flow of packets through the hardware interconnect. In order to visualize this large and complex data, we employ two linked views of the hardware network. The first is a 2D view, that represents the network structure as one of several simplified planar projections. This view is designed to allow a user to easily identify trends and patterns in the network traffic. The second is a 3D view that augments the 2D view by preserving the physical network topology and providing a context that is familiar to the application developers. Using the massively parallel multi-physics code pF3D as a case study, we demonstrate that our tool provides valuable insight that we use to explain and optimize pF3D's performance on an IBM Blue Gene/P system.},
}
@article{p441,
journal = {IEEE TVCG},
year = 2012,
title = {Visualizing Student Histories Using Clustering and Composition},
doi = {10.1109/TVCG.2012.288},
url = {http://dx.doi.org/10.1109/TVCG.2012.288},
author = {Trimm, D. and Rheingans, P. and desJardins, M.},
pages = {2809--2818},
keywords = {Clustering, aggregate visualization, student performance analysis, visualization composition},
abstract = {While intuitive time-series visualizations exist for common datasets, student course history data is difficult to represent using traditional visualization techniques due its concurrent nature. A visual composition process is developed and applied to reveal trends across various groupings. By working closely with educators, analytic strategies and techniques are developed to leverage the visualization composition to reveal unknown trends in the data. Furthermore, clustering algorithms are developed to group common course-grade histories for further analysis. Lastly, variations of the composition process are implemented to reveal subtle differences in the underlying data. These analytic tools and techniques enabled educators to confirm expected trends and to discover new ones.},
}
@article{p442,
journal = {IEEE TVCG},
year = 2012,
title = {Whisper: Tracing the Spatiotemporal Process of Information Diffusion in Real Time},
doi = {10.1109/TVCG.2012.291},
url = {http://dx.doi.org/10.1109/TVCG.2012.291},
author = {Nan Cao and Yu-Ru Lin and Xiaohua Sun and Lazer, D. and Shixia Liu and Huamin Qu},
pages = {2649--2658},
keywords = {Information visualization, Information diffusion, Contagion, Social media, Microblogging, Spatiotemporal patterns},
abstract = {When and where is an idea dispersed? Social media, like Twitter, has been increasingly used for exchanging information, opinions and emotions about events that are happening across the world. Here we propose a novel visualization design, “Whisper”, for tracing the process of information diffusion in social media in real time. Our design highlights three major characteristics of diffusion processes in social media: the temporal trend, social-spatial extent, and community response of a topic of interest. Such social, spatiotemporal processes are conveyed based on a sunflower metaphor whose seeds are often dispersed far away. In Whisper, we summarize the collective responses of communities on a given topic based on how tweets were retweeted by groups of users, through representing the sentiments extracted from the tweets, and tracing the pathways of retweets on a spatial hierarchical layout. We use an efficient flux line-drawing algorithm to trace multiple pathways so the temporal and spatial patterns can be identified even for a bursty event. A focused diffusion series highlights key roles such as opinion leaders in the diffusion process. We demonstrate how our design facilitates the understanding of when and where a piece of information is dispersed and what are the social responses of the crowd, for large-scale events including political campaigns and natural disasters. Initial feedback from domain experts suggests promising use for today's information consumption and dispersion in the wild.},
}
@article{p537,
journal = {IEEE TVCG},
year = 2011,
title = {A Study on Dual-Scale Data Charts},
doi = {10.1109/TVCG.2011.160},
url = {http://dx.doi.org/10.1109/TVCG.2011.160},
author = {Isenberg, P. and Bezerianos, A. and Dragicevic, P. and Fekete, J.},
pages = {2469--2478},
keywords = {Focus+Context, Quantitative Experiment, Dual-Scale Charts},
abstract = {We present the results of a user study that compares different ways of representing Dual-Scale data charts. Dual-Scale charts incorporate two different data resolutions into one chart in order to emphasize data in regions of interest or to enable the comparison of data from distant regions. While some design guidelines exist for these types of charts, there is currently little empirical evidence on which to base their design. We fill this gap by discussing the design space of Dual-Scale cartesian-coordinate charts and by experimentally comparing the performance of different chart types with respect to elementary graphical perception tasks such as comparing lengths and distances. Our study suggests that cut-out charts which include collocated full context and focus are the best alternative, and that superimposed charts in which focus and context overlap on top of each other should be avoided.},
}
@article{p538,
journal = {IEEE TVCG},
year = 2011,
title = {Adaptive Privacy-Preserving Visualization Using Parallel Coordinates},
doi = {10.1109/TVCG.2011.163},
url = {http://dx.doi.org/10.1109/TVCG.2011.163},
author = {Dasgupta, A. and Kosara, R.},
pages = {2241--2248},
keywords = {Parallel coordinates, privacy, clustering},
abstract = {Current information visualization techniques assume unrestricted access to data. However, privacy protection is a key issue for a lot of real-world data analyses. Corporate data, medical records, etc. are rich in analytical value but cannot be shared without first going through a transformation step where explicit identifiers are removed and the data is sanitized. Researchers in the field of data mining have proposed different techniques over the years for privacy-preserving data publishing and subsequent mining techniques on such sanitized data. A well-known drawback in these methods is that for even a small guarantee of privacy, the utility of the datasets is greatly reduced. In this paper, we propose an adaptive technique for privacy preser vation in parallel coordinates. Based on knowledge about the sensitivity of the data, we compute a clustered representation on the fly, which allows the user to explore the data without breaching privacy. Through the use of screen-space privacy metrics, the technique adapts to the user's screen parameters and interaction. We demonstrate our method in a case study and discuss potential attack scenarios.},
}
@article{p539,
journal = {IEEE TVCG},
year = 2011,
title = {Angular Histograms: Frequency-Based Visualizations for Large, High Dimensional Data},
doi = {10.1109/TVCG.2011.166},
url = {http://dx.doi.org/10.1109/TVCG.2011.166},
author = {Zhao Geng and ZhenMin Peng and Laramee, R.S. and Roberts, J.C. and Walker, R.},
pages = {2572--2580},
keywords = {Parallel Coordinates, Angular Histogram, Attribute Curves},
abstract = {Parallel coordinates is a popular and well-known multivariate data visualization technique. However, one of their inherent limitations has to do with the rendering of very large data sets. This often causes an overplotting problem and the goal of the visual information seeking mantra is hampered because of a cluttered overview and non-interactive update rates. In this paper, we propose two novel solutions, namely, angular histograms and attribute curves. These techniques are frequency-based approaches to large, high-dimensional data visualization. They are able to convey both the density of underlying polylines and their slopes. Angular histogram and attribute curves offer an intuitive way for the user to explore the clustering, linear correlations and outliers in large data sets without the over-plotting and clutter problems associated with traditional parallel coordinates. We demonstrate the results on a wide variety of data sets including real-world, high-dimensional biological data. Finally, we compare our methods with the other popular frequency-based algorithms.},
}
@article{p540,
journal = {IEEE TVCG},
year = 2011,
title = {Arc Length-Based Aspect Ratio Selection},
doi = {10.1109/TVCG.2011.167},
url = {http://dx.doi.org/10.1109/TVCG.2011.167},
author = {Talbot, J. and Gerth, J. and Hanrahan, P.},
pages = {2276--2282},
keywords = {Aspect ratio selection, Banking to 45 degrees, Orientation resolution},
abstract = {The aspect ratio of a plot has a dramatic impact on our ability to perceive trends and patterns in the data. Previous approaches for automatically selecting the aspect ratio have been based on adjusting the orientations or angles of the line segments in the plot. In contrast, we recommend a simple, effective method for selecting the aspect ratio: minimize the arc length of the data curve while keeping the area of the plot constant. The approach is parameterization invariant, robust to a wide range of inputs, preserves visual symmetries in the data, and is a compromise between previously proposed techniques. Further, we demonstrate that it can be effectively used to select the aspect ratio of contour plots. We believe arc length should become the default aspect ratio selection method.},
}
@article{p541,
journal = {IEEE TVCG},
year = 2011,
title = {Asymmetric Relations in Longitudinal Social Networks},
doi = {10.1109/TVCG.2011.169},
url = {http://dx.doi.org/10.1109/TVCG.2011.169},
author = {Brandes, U. and Nick, B.},
pages = {2283--2290},
keywords = {Network visualization, Social networks, Time series data, visual knolwedge discovery and representation, glyph-based techniques},
abstract = {In modeling and analysis of longitudinal social networks, visual exploration is used in particular to complement and inform other methods. The most common graphical representations for this purpose appear to be animations and small multiples of intermediate states, depending on the type of media available. We present an alternative approach based on matrix representation of gestaltlines (a combination of Tufte's sparklines with glyphs based on gestalt theory). As a result, we obtain static, compact, yet data-rich diagrams that support specifically the exploration of evolving dyadic relations and persistent group structure, although at the expense of cross-sectional network views and indirect linkages.},
}
@article{p542,
journal = {IEEE TVCG},
year = 2011,
title = {BallotMaps: Detecting Name Bias in Alphabetically Ordered Ballot Papers},
doi = {10.1109/TVCG.2011.174},
url = {http://dx.doi.org/10.1109/TVCG.2011.174},
author = {Wood, J. and Badawood, D. and Dykes, J. and Slingsby, A.},
pages = {2384--2391},
keywords = {Voting, election, bias, democracy, governance, treemaps, geovisualization, hierarchy, governance},
abstract = {The relationship between candidates' position on a ballot paper and vote rank is explored in the case of 5000 candidates for the UK 2010 local government elections in the Greater London area. This design study uses hierarchical spatially arranged graphics to represent two locations that affect candidates at very different scales: the geographical areas for which they seek election and the spatial location of their names on the ballot paper. This approach allows the effect of position bias to be assessed; that is, the degree to which the position of a candidate's name on the ballot paper influences the number of votes received by the candidate, and whether this varies geographically. Results show that position bias was significant enough to influence rank order of candidates, and in the case of many marginal electoral wards, to influence who was elected to government. Position bias was observed most strongly for Liberal Democrat candidates but present for all major political parties. Visual analysis of classification of candidate names by ethnicity suggests that this too had an effect on votes received by candidates, in some cases overcoming alphabetic name bias. The results found contradict some earlier research suggesting that alphabetic name bias was not sufficiently significant to affect electoral outcome and add new evidence for the geographic and ethnicity influences on voting behaviour. The visual approach proposed here can be applied to a wider range of electoral data and the patterns identified and hypotheses derived from them could have significant implications for the design of ballot papers and the conduct of fair elections.},
}
@article{p543,
journal = {IEEE TVCG},
year = 2011,
title = {Benefitting InfoVis with Visual Difficulties},
doi = {10.1109/TVCG.2011.175},
url = {http://dx.doi.org/10.1109/TVCG.2011.175},
author = {Hullman, J. and Adar, E. and Shah, P.},
pages = {2213--2222},
keywords = {Desirable difficulites, cognitive efficiency, active processing, engagement, individual differences },
abstract = {Many well-cited theories for visualization design state that a visual representation should be optimized for quick and immediate interpretation by a user. Distracting elements like decorative "chartjunk" or extraneous information are avoided so as not to slow comprehension. Yet several recent studies in visualization research provide evidence that non-efficient visual elements may benefit comprehension and recall on the part of users. Similarly, findings from studies related to learning from visual displays in various subfields of psychology suggest that introducing cognitive difficulties to visualization interaction can improve a user's understanding of important information. In this paper, we synthesize empirical results from cross-disciplinary research on visual information representations, providing a counterpoint to efficiency-based design theory with guidelines that describe how visual difficulties can be introduced to benefit comprehension and recall. We identify conditions under which the application of visual difficulties is appropriate based on underlying factors in visualization interaction like active processing and engagement. We characterize effective graph design as a trade-off between efficiency and learning difficulties in order to provide Information Visualization (InfoVis) researchers and practitioners with a framework for organizing explorations of graphs for which comprehension and recall are crucial. We identify implications of this view for the design and evaluation of information visualizations.},
}
@article{p544,
journal = {IEEE TVCG},
year = 2011,
title = {BirdVis: Visualizing and Understanding Bird Populations},
doi = {10.1109/TVCG.2011.176},
url = {http://dx.doi.org/10.1109/TVCG.2011.176},
author = {Ferreira, N. and Lins, L. and Fink, D. and Kelling, S. and Wood, C. and Freire, J. and Silva, C.T.},
pages = {2374--2383},
keywords = {Ornithology, species distribution models, multiscale analysis, spatial data, temporal data},
abstract = {Birds are unrivaled windows into biotic processes at all levels and are proven indicators of ecological well-being. Understanding the determinants of species distributions and their dynamics is an important aspect of ecology and is critical for conservation and management. Through crowdsourcing, since 2002, the eBird project has been collecting bird observation records. These observations, together with local-scale environmental covariates such as climate, habitat, and vegetation phenology have been a valuable resource for a global community of educators, land managers, ornithologists, and conservation biologists. By associating environmental inputs with observed patterns of bird occurrence, predictive models have been developed that provide a statistical framework to harness available data for predicting species distributions and making inferences about species-habitat associations. Understanding these models, however, is challenging because they require scientists to quantify and compare multiscale spatialtemporal patterns. A large series of coordinated or sequential plots must be generated, individually programmed, and manually composed for analysis. This hampers the exploration and is a barrier to making the cross-species comparisons that are essential for coordinating conservation and extracting important ecological information. To address these limitations, as part of a collaboration among computer scientists, statisticians, biologists and ornithologists, we have developed BirdVis, an interactive visualization system that supports the analysis of spatio-temporal bird distribution models. BirdVis leverages visualization techniques and uses them in a novel way to better assist users in the exploration of interdependencies among model parameters. Furthermore, the system allows for comparative visualization through coordinated views, providing an intuitive interface to identify relevant correlations and patterns. We justify our design decisions and present case s- udies that show how BirdVis has helped scientists obtain new evidence for existing hypotheses, as well as formulate new hypotheses in their domain.},
}
@article{p545,
journal = {IEEE TVCG},
year = 2011,
title = {Brushing Dimensions - A Dual Visual Analysis Model for High-Dimensional Data},
doi = {10.1109/TVCG.2011.178},
url = {http://dx.doi.org/10.1109/TVCG.2011.178},
author = {Turkay, C. and Filzmoser, P. and Hauser, H.},
pages = {2591--2599},
keywords = {Interactive visual analysis, High-dimensional data analysis},
abstract = {In many application fields, data analysts have to deal with datasets that contain many expressions per item. The effective analysis of such multivariate datasets is dependent on the user's ability to understand both the intrinsic dimensionality of the dataset as well as the distribution of the dependent values with respect to the dimensions. In this paper, we propose a visualization model that enables the joint interactive visual analysis of multivariate datasets with respect to their dimensions as well as with respect to the actual data values. We describe a dual setting of visualization and interaction in items space and in dimensions space. The visualization of items is linked to the visualization of dimensions with brushing and focus+context visualization. With this approach, the user is able to jointly study the structure of the dimensions space as well as the distribution of data items with respect to the dimensions. Even though the proposed visualization model is general, we demonstrate its application in the context of a DNA microarray data analysis.},
}
@article{p546,
journal = {IEEE TVCG},
year = 2011,
title = {CloudLines: Compact Display of Event Episodes in Multiple Time-Series},
doi = {10.1109/TVCG.2011.179},
url = {http://dx.doi.org/10.1109/TVCG.2011.179},
author = {Krstajic, M. and Bertini, E. and Keim, D.A.},
pages = {2432--2439},
keywords = {Incremental Visualization, Event-based Data, Lens Distortion},
abstract = {We propose incremental logarithmic time-series technique as a way to deal with time-based representations of large and dynamic event data sets in limited space. Modern data visualization problems in the domains of news analysis, network security and financial applications, require visual analysis of incremental data, which poses specific challenges that are normally not solved by static visualizations. The incremental nature of the data implies that visualizations have to necessarily change their content and still provide comprehensible representations. In particular, in this paper we deal with the need to keep an eye on recent events together with providing a context on the past and to make relevant patterns accessible at any scale. Our technique adapts to the incoming data by taking care of the rate at which data items occur and by using a decay function to let the items fade away according to their relevance. Since access to details is also important, we also provide a novel distortion magnifying lens technique which takes into account the distortions introduced by the logarithmic time scale to augment readability in selected areas of interest. We demonstrate the validity of our techniques by applying them on incremental data coming from online news streams in different time frames.},
}
@article{p547,
journal = {IEEE TVCG},
year = 2011,
title = {Composite Density Maps for Multivariate Trajectories},
doi = {10.1109/TVCG.2011.181},
url = {http://dx.doi.org/10.1109/TVCG.2011.181},
author = {Scheepens, R. and Willems, N. and van de Wetering, H. and Andrienko, G. and Andrienko, N. and van Wijk, J.J.},
pages = {2518--2527},
keywords = {Trajectories, Kernel Density Estimation, Multivariate Data, Geographical Information Systems, Raster Maps},
abstract = {We consider moving objects as multivariate time-series. By visually analyzing the attributes, patterns may appear that explain why certain movements have occurred. Density maps as proposed by Scheepens et al. [25] are a way to reveal these patterns by means of aggregations of filtered subsets of trajectories. Since filtering is often not sufficient for analysts to express their domain knowledge, we propose to use expressions instead. We present a flexible architecture for density maps to enable custom, versatile exploration using multiple density fields. The flexibility comes from a script, depicted in this paper as a block diagram, which defines an advanced computation of a density field. We define six different types of blocks to create, compose, and enhance trajectories or density fields. Blocks are customized by means of expressions that allow the analyst to model domain knowledge. The versatility of our architecture is demonstrated with several maritime use cases developed with domain experts. Our approach is expected to be useful for the analysis of objects in other domains.},
}
@article{p548,
journal = {IEEE TVCG},
year = 2011,
title = {Context-Preserving Visual Links},
doi = {10.1109/TVCG.2011.183},
url = {http://dx.doi.org/10.1109/TVCG.2011.183},
author = {Steinberger, M. and Waldner, M. and Streit, M. and Lex, A. and Schmalstieg, D.},
pages = {2249--2258},
keywords = {Visual links, highlighting, connectedness, routing, image-based, saliency},
abstract = {Evaluating, comparing, and interpreting related pieces of information are tasks that are commonly performed during visual data analysis and in many kinds of information-intensive work. Synchronized visual highlighting of related elements is a well-known technique used to assist this task. An alternative approach, which is more invasive but also more expressive is visual linking in which line connections are rendered between related elements. In this work, we present context-preserving visual links as a new method for generating visual links. The method specifically aims to fulfill the following two goals: first, visual links should minimize the occlusion of important information; second, links should visually stand out from surrounding information by minimizing visual interference. We employ an image-based analysis of visual saliency to determine the important regions in the original representation. A consequence of the image-based approach is that our technique is application-independent and can be employed in a large number of visual data analysis scenarios in which the underlying content cannot or should not be altered. We conducted a controlled experiment that indicates that users can find linked elements in complex visualizations more quickly and with greater subjective satisfaction than in complex visualizations in which plain highlighting is used. Context-preserving visual links were perceived as visually more attractive than traditional visual links that do not account for the context information.},
}
@article{p549,
journal = {IEEE TVCG},
year = 2011,
title = {D³ Data-Driven Documents},
doi = {10.1109/TVCG.2011.185},
url = {http://dx.doi.org/10.1109/TVCG.2011.185},
author = {Bostock, M. and Ogievetsky, V. and Heer, J.},
pages = {2301--2309},
keywords = {Information visualization, user interfaces, toolkits, 2D graphics},
abstract = {Data-Driven Documents (D3) is a novel representation-transparent approach to visualization for the web. Rather than hide the underlying scenegraph within a toolkit-specific abstraction, D3 enables direct inspection and manipulation of a native representation: the standard document object model (DOM). With D3, designers selectively bind input data to arbitrary document elements, applying dynamic transforms to both generate and modify content. We show how representational transparency improves expressiveness and better integrates with developer tools than prior approaches, while offering comparable notational efficiency and retaining powerful declarative components. Immediate evaluation of operators further simplifies debugging and allows iterative development. Additionally, we demonstrate how D3 transforms naturally enable animation and interaction with dramatic performance improvements over intermediate representations.},
}
@article{p552,
journal = {IEEE TVCG},
year = 2011,
title = {Design Study of LineSets, a Novel Set Visualization Technique},
doi = {10.1109/TVCG.2011.186},
url = {http://dx.doi.org/10.1109/TVCG.2011.186},
author = {Alper, B. and Riche, N.H. and Ramos, G. and Czerwinski, M.},
pages = {2259--2267},
keywords = {Set visualization, clustering, faceted data visualization, graph visualization \n\n},
abstract = {Computing and visualizing sets of elements and their relationships is one of the most common tasks one performs when analyzing and organizing large amounts of data. Common representations of sets such as convex or concave geometries can become cluttered and difficult to parse when these sets overlap in multiple or complex ways, e.g., when multiple elements belong to multiple sets. In this paper, we present a design study of a novel set visual representation, LineSets, consisting of a curve connecting all of the set's elements. Our approach to design the visualization differs from traditional methodology used by the InfoVis community. We first explored the potential of the visualization concept by running a controlled experiment comparing our design sketches to results from the state-of-the-art technique. Our results demonstrated that LineSets are advantageous for certain tasks when compared to concave shapes. We discuss an implementation of LineSets based on simple heuristics and present a study demonstrating that our generated curves do as well as human-drawn ones. Finally, we present two applications of our technique in the context of search tasks on a map and community analysis tasks in social networks.},
}
@article{p553,
journal = {IEEE TVCG},
year = 2011,
title = {Developing and Evaluating Quilts for the Depiction of Large Layered Graphs},
doi = {10.1109/TVCG.2011.187},
url = {http://dx.doi.org/10.1109/TVCG.2011.187},
author = {Juhee Bae and Watson, B.},
pages = {2268--2275},
keywords = {Graph drawing, layered graphs, matrix based depiction, node-link diagram},
abstract = {Traditional layered graph depictions such as flow charts are in wide use. Yet as graphs grow more complex, these depictions can become difficult to understand. Quilts are matrix-based depictions for layered graphs designed to address this problem. In this research, we first improve Quilts by developing three design alternatives, and then compare the best of these alternatives to better-known node-link and matrix depictions. A primary weakness in Quilts is their depiction of skip links, links that do not simply connect to a succeeding layer. Therefore in our first study, we compare Quilts using color-only, text-only, and mixed (color and text) skip link depictions, finding that path finding with the color-only depiction is significantly slower and less accurate, and that in certain cases, the mixed depiction offers an advantage over the text-only depiction. In our second study, we compare Quilts using the mixed depiction to node-link diagrams and centered matrices. Overall results show that users can find paths through graphs significantly faster with Quilts (46.6 secs) than with node-link (58.3 secs) or matrix (71.2 secs) diagrams. This speed advantage is still greater in large graphs (e.g. in 200 node graphs, 55.4 secs vs. 71.1 secs for node-link and 84.2 secs for matrix depictions).},
}
@article{p554,
journal = {IEEE TVCG},
year = 2011,
title = {DICON: Interactive Visual Analysis of Multidimensional Clusters},
doi = {10.1109/TVCG.2011.188},
url = {http://dx.doi.org/10.1109/TVCG.2011.188},
author = {Nan Cao and Gotz, D. and Jimeng Sun and Huamin Qu},
pages = {2581--2590},
keywords = {Visual Analysis, Clustering, Information Visualization},
abstract = {Clustering as a fundamental data analysis technique has been widely used in many analytic applications. However, it is often difficult for users to understand and evaluate multidimensional clustering results, especially the quality of clusters and their semantics. For large and complex data, high-level statistical information about the clusters is often needed for users to evaluate cluster quality while a detailed display of multidimensional attributes of the data is necessary to understand the meaning of clusters. In this paper, we introduce DICON, an icon-based cluster visualization that embeds statistical information into a multi-attribute display to facilitate cluster interpretation, evaluation, and comparison. We design a treemap-like icon to represent a multidimensional cluster, and the quality of the cluster can be conveniently evaluated with the embedded statistical information. We further develop a novel layout algorithm which can generate similar icons for similar clusters, making comparisons of clusters easier. User interaction and clutter reduction are integrated into the system to help users more effectively analyze and refine clustering results for large datasets. We demonstrate the power of DICON through a user study and a case study in the healthcare domain. Our evaluation shows the benefits of the technique, especially in support of complex multidimensional cluster analysis.},
}
@article{p555,
journal = {IEEE TVCG},
year = 2011,
title = {Divided Edge Bundling for Directional Network Data},
doi = {10.1109/TVCG.2011.190},
url = {http://dx.doi.org/10.1109/TVCG.2011.190},
author = {Selassie, D. and Heller, B. and Heer, J.},
pages = {2354--2363},
keywords = {Graph visualization, aggregation, node-link diagrams, edge bundling, physical simulation},
abstract = {The node-link diagram is an intuitive and venerable way to depict a graph. To reduce clutter and improve the readability of node-link views, Holten & van Wijk's force-directed edge bundling employs a physical simulation to spatially group graph edges. While both useful and aesthetic, this technique has shortcomings: it bundles spatially proximal edges regardless of direction, weight, or graph connectivity. As a result, high-level directional edge patterns are obscured. We present divided edge bundling to tackle these shortcomings. By modifying the forces in the physical simulation, directional lanes appear as an emergent property of edge direction. By considering graph topology, we only bundle edges related by graph structure. Finally, we aggregate edge weights in bundles to enable more accurate visualization of total bundle weights. We compare visualizations created using our technique to standard force-directed edge bundling, matrix diagrams, and clustered graphs; we find that divided edge bundling leads to visualizations that are easier to interpret and reveal both familiar and previously obscured patterns.},
}
@article{p556,
journal = {IEEE TVCG},
year = 2011,
title = {Drawing Road Networks with Focus Regions},
doi = {10.1109/TVCG.2011.191},
url = {http://dx.doi.org/10.1109/TVCG.2011.191},
author = {Haunert, J.-H. and Sering, L.},
pages = {2555--2562},
keywords = {cartography, schematic maps, fish-eye view, graph drawing, optimization, quadratic programming},
abstract = {Mobile users of maps typically need detailed information about their surroundings plus some context information about remote places. In order to avoid that the map partly gets too dense, cartographers have designed mapping functions that enlarge a user-defined focus region - such functions are sometimes called fish-eye projections. The extra map space occupied by the enlarged focus region is compensated by distorting other parts of the map. We argue that, in a map showing a network of roads relevant to the user, distortion should preferably take place in those areas where the network is sparse. Therefore, we do not apply a predefined mapping function. Instead, we consider the road network as a graph whose edges are the road segments. We compute a new spatial mapping with a graph-based optimization approach, minimizing the square sum of distortions at edges. Our optimization method is based on a convex quadratic program (CQP); CQPs can be solved in polynomial time. Important requirements on the output map are expressed as linear inequalities. In particular, we show how to forbid edge crossings. We have implemented our method in a prototype tool. For instances of different sizes, our method generated output maps that were far less distorted than those generated with a predefined fish-eye projection. Future work is needed to automate the selection of roads relevant to the user. Furthermore, we aim at fast heuristics for application in real-time systems.},
}
@article{p557,
journal = {IEEE TVCG},
year = 2011,
title = {Evaluation of Artery Visualizations for Heart Disease Diagnosis},
doi = {10.1109/TVCG.2011.192},
url = {http://dx.doi.org/10.1109/TVCG.2011.192},
author = {Borkin, M. and Gajos, K. and Peters, A. and Mitsouras, D. and Melchionna, S. and Rybicki, F. and Feldman, C. and Pfister, H.},
pages = {2479--2488},
keywords = {Quantitative evaluation, qualitative evaluation, biomedical and medical visualization},
abstract = {Heart disease is the number one killer in the United States, and finding indicators of the disease at an early stage is critical for treatment and prevention. In this paper we evaluate visualization techniques that enable the diagnosis of coronary artery disease. A key physical quantity of medical interest is endothelial shear stress (ESS). Low ESS has been associated with sites of lesion formation and rapid progression of disease in the coronary arteries. Having effective visualizations of a patient's ESS data is vital for the quick and thorough non-invasive evaluation by a cardiologist. We present a task taxonomy for hemodynamics based on a formative user study with domain experts. Based on the results of this study we developed HemoVis, an interactive visualization application for heart disease diagnosis that uses a novel 2D tree diagram representation of coronary artery trees. We present the results of a formal quantitative user study with domain experts that evaluates the effect of 2D versus 3D artery representations and of color maps on identifying regions of low ESS. We show statistically significant results demonstrating that our 2D visualizations are more accurate and efficient than 3D representations, and that a perceptually appropriate color map leads to fewer diagnostic mistakes than a rainbow color map.},
}
@article{p558,
journal = {IEEE TVCG},
year = 2011,
title = {Evaluation of Traditional, Orthogonal, and Radial Tree Diagrams by an Eye Tracking Study},
doi = {10.1109/TVCG.2011.193},
url = {http://dx.doi.org/10.1109/TVCG.2011.193},
author = {Burch, M. and Konevtsova, N. and Heinrich, J. and Hoeferlin, M. and Weiskopf, D.},
pages = {2440--2448},
keywords = {Hierarchy visualization, node-link layout, eye tracking, user study},
abstract = {Node-link diagrams are an effective and popular visualization approach for depicting hierarchical structures and for showing parent-child relationships. In this paper, we present the results of an eye tracking experiment investigating traditional, orthogonal, and radial node-link tree layouts as a piece of empirical basis for choosing between those layouts. Eye tracking was used to identify visual exploration behaviors of participants that were asked to solve a typical hierarchy exploration task by inspecting a static tree diagram: finding the least common ancestor of a given set of marked leaf nodes. To uncover exploration strategies, we examined fixation points, duration, and saccades of participants' gaze trajectories. For the non-radial diagrams, we additionally investigated the effect of diagram orientation by switching the position of the root node to each of the four main orientations. We also recorded and analyzed correctness of answers as well as completion times in addition to the eye movement data. We found out that traditional and orthogonal tree layouts significantly outperform radial tree layouts for the given task. Furthermore, by applying trajectory analysis techniques we uncovered that participants cross-checked their task solution more often in the radial than in the non-radial layouts.},
}
@article{p559,
journal = {IEEE TVCG},
year = 2011,
title = {Exploratory Analysis of Time-Series with ChronoLenses},
doi = {10.1109/TVCG.2011.195},
url = {http://dx.doi.org/10.1109/TVCG.2011.195},
author = {Jian Zhao and Chevalier, F. and Pietriga, E. and Balakrishnan, R.},
pages = {2422--2431},
keywords = {Time-series Data, Exploratory Visualization, Focus+Context, Lens, Interaction Techniques},
abstract = {Visual representations of time-series are useful for tasks such as identifying trends, patterns and anomalies in the data. Many techniques have been devised to make these visual representations more scalable, enabling the simultaneous display of multiple variables, as well as the multi-scale display of time-series of very high resolution or that span long time periods. There has been comparatively little research on how to support the more elaborate tasks associated with the exploratory visual analysis of timeseries, e.g., visualizing derived values, identifying correlations, or discovering anomalies beyond obvious outliers. Such tasks typically require deriving new time-series from the original data, trying different functions and parameters in an iterative manner. We introduce a novel visualization technique called ChronoLenses, aimed at supporting users in such exploratory tasks. ChronoLenses perform on-the-fly transformation of the data points in their focus area, tightly integrating visual analysis with user actions, and enabling the progressive construction of advanced visual analysis pipelines.},
}
@article{p560,
journal = {IEEE TVCG},
year = 2011,
title = {Exploring Ambient and Artistic Visualization for Residential Energy Use Feedback},
doi = {10.1109/TVCG.2011.196},
url = {http://dx.doi.org/10.1109/TVCG.2011.196},
author = {Rodgers, J. and Bartram, L.},
pages = {2489--2497},
keywords = {Ambient visualization, informative art, casual infovis, sustainability, distributed visualization},
abstract = {Providing effective feedback on resource consumption in the home is a key challenge of environmental conservation efforts. One promising approach for providing feedback about residential energy consumption is the use of ambient and artistic visualizations. Pervasive computing technologies enable the integration of such feedback into the home in the form of distributed point-of-consumption feedback devices to support decision-making in everyday activities. However, introducing these devices into the home requires sensitivity to the domestic context. In this paper we describe three abstract visualizations and suggest four design requirements that this type of device must meet to be effective: pragmatic, aesthetic, ambient, and ecological. We report on the findings from a mixed methods user study that explores the viability of using ambient and artistic feedback in the home based on these requirements. Our findings suggest that this approach is a viable way to provide resource use feedback and that both the aesthetics of the representation and the context of use are important elements that must be considered in this design space.},
}
@article{p561,
journal = {IEEE TVCG},
year = 2011,
title = {Exploring Uncertainty in Geodemographics with Interactive Graphics},
doi = {10.1109/TVCG.2011.197},
url = {http://dx.doi.org/10.1109/TVCG.2011.197},
author = {Slingsby, A. and Dykes, J. and Wood, J.},
pages = {2545--2554},
keywords = {Geodemographics, OAC, classification, cartography, uncertainty},
abstract = {Geodemographic classifiers characterise populations by categorising geographical areas according to the demographic and lifestyle characteristics of those who live within them. The dimension-reducing quality of such classifiers provides a simple and effective means of characterising population through a manageable set of categories, but inevitably hides heterogeneity, which varies within and between the demographic categories and geographical areas, sometimes systematically. This may have implications for their use, which is widespread in government and commerce for planning, marketing and related activities. We use novel interactive graphics to delve into OAC - a free and open geodemographic classifier that classifies the UK population in over 200,000 small geographical areas into 7 super-groups, 21 groups and 52 sub-groups. Our graphics provide access to the original 41 demographic variables used in the classification and the uncertainty associated with the classification of each geographical area on-demand. It also supports comparison geographically and by category. This serves the dual purpose of helping understand the classifier itself leading to its more informed use and providing a more comprehensive view of population in a comprehensible manner. We assess the impact of these interactive graphics on experienced OAC users who explored the details of the classification, its uncertainty and the nature of between - and within - class variation and then reflect on their experiences. Visualization of the complexities and subtleties of the classification proved to be a thought-provoking exercise both confirming and challenging users' understanding of population, the OAC classifier and the way it is used in their organisations. Users identified three contexts for which the techniques were deemed useful in the context of local government, confirming the validity of the proposed methods.},
}
@article{p562,
journal = {IEEE TVCG},
year = 2011,
title = {Flexible Linked Axes for Multivariate Data Visualization},
doi = {10.1109/TVCG.2011.201},
url = {http://dx.doi.org/10.1109/TVCG.2011.201},
author = {Claessen, J.H.T. and van Wijk, J.J.},
pages = {2310--2316},
keywords = {Multivariate data, visualization, scatterplot, Parallel Coordinates Plot},
abstract = {Multivariate data visualization is a classic topic, for which many solutions have been proposed, each with its own strengths and weaknesses. In standard solutions the structure of the visualization is fixed, we explore how to give the user more freedom to define visualizations. Our new approach is based on the usage of Flexible Linked Axes: The user is enabled to define a visualization by drawing and linking axes on a canvas. Each axis has an associated attribute and range, which can be adapted. Links between pairs of axes are used to show data in either scatter plot- or Parallel Coordinates Plot-style. Flexible Linked Axes enable users to define a wide variety of different visualizations. These include standard methods, such as scatter plot matrices, radar charts, and PCPs [11]; less well known approaches, such as Hyperboxes [1], TimeWheels [17], and many-to-many relational parallel coordinate displays [14]; and also custom visualizations, consisting of combinations of scatter plots and PCPs. Furthermore, our method allows users to define composite visualizations that automatically support brushing and linking. We have discussed our approach with ten prospective users, who found the concept easy to understand and highly promising.},
}
@article{p563,
journal = {IEEE TVCG},
year = 2011,
title = {Flow Map Layout via Spiral Trees},
doi = {10.1109/TVCG.2011.202},
url = {http://dx.doi.org/10.1109/TVCG.2011.202},
author = {Buchin, K. and Speckmann, B. and Verbeek, K.},
pages = {2536--2544},
keywords = {Flow maps, Automated Cartography, Spiral Trees},
abstract = {Flow maps are thematic maps that visualize the movement of objects, such as people or goods, between geographic regions. One or more sources are connected to several targets by lines whose thickness corresponds to the amount of flow between a source and a target. Good flow maps reduce visual clutter by merging (bundling) lines smoothly and by avoiding self-intersections. Most flow maps are still drawn by hand and only few automated methods exist. Some of the known algorithms do not support edge-bundling and those that do, cannot guarantee crossing-free flows. We present a new algorithmic method that uses edge-bundling and computes crossing-free flows of high visual quality. Our method is based on so-called spiral trees, a novel type of Steiner tree which uses logarithmic spirals. Spiral trees naturally induce a clustering on the targets and smoothly bundle lines. Our flows can also avoid obstacles, such as map features, region outlines, or even the targets. We demonstrate our approach with extensive experiments.},
}
@article{p564,
journal = {IEEE TVCG},
year = 2011,
title = {Focus+Context Metro Maps},
doi = {10.1109/TVCG.2011.205},
url = {http://dx.doi.org/10.1109/TVCG.2011.205},
author = {Yu-Shuen Wang and Ming-Te Chi},
pages = {2528--2535},
keywords = {Focus+context visualization, metro map, octilinear layout, graph labeling, optimization},
abstract = {We introduce a focus+context method to visualize a complicated metro map of a modern city on a small displaying area. The context of our work is with regard the popularity of mobile devices. The best route to the destination, which can be obtained from the arrival time of trains, is highlighted. The stations on the route enjoy larger spaces, whereas the other stations are rendered smaller and closer to fit the whole map into a screen. To simplify the navigation and route planning for visitors, we formulate various map characteristics such as octilinear transportation lines and regular station distances into energy terms. We then solve for the optimal layout in a least squares sense. In addition, we label the names of stations that are on the route of a passenger according to human preferences, occlusions, and consistencies of label positions using the graph cuts method. Our system achieves real-time performance by being able to report instant information because of the carefully designed energy terms. We apply our method to layout a number of metro maps and show the results and timing statistics to demonstrate the feasibility of our technique.},
}
@article{p565,
journal = {IEEE TVCG},
year = 2011,
title = {Human-Centered Approaches in Geovisualization Design: Investigating Multiple Methods Through a Long-Term Case Study},
doi = {10.1109/TVCG.2011.209},
url = {http://dx.doi.org/10.1109/TVCG.2011.209},
author = {Lloyd, D. and Dykes, J.},
pages = {2498--2507},
keywords = {Evaluation, geovisualization, context of use, requirements, field study, prototypes, sketching, design},
abstract = {Working with three domain specialists we investigate human-centered approaches to geovisualization following an ISO13407 taxonomy covering context of use, requirements and early stages of design. Our case study, undertaken over three years, draws attention to repeating trends: that generic approaches fail to elicit adequate requirements for geovis application design; that the use of real data is key to understanding needs and possibilities; that trust and knowledge must be built and developed with collaborators. These processes take time but modified human-centred approaches can be effective. A scenario developed through contextual inquiry but supplemented with domain data and graphics is useful to geovis designers. Wireframe, paper and digital prototypes enable successful communication between specialist and geovis domains when incorporating real and interesting data, prompting exploratory behaviour and eliciting previously unconsidered requirements. Paper prototypes are particularly successful at eliciting suggestions, especially for novel visualization. Enabling specialists to explore their data freely with a digital prototype is as effective as using a structured task protocol and is easier to administer. Autoethnography has potential for framing the design process. We conclude that a common understanding of context of use, domain data and visualization possibilities are essential to successful geovis design and develop as this progresses. HC approaches can make a significant contribution here. However, modified approaches, applied with flexibility, are most promising. We advise early, collaborative engagement with data - through simple, transient visual artefacts supported by data sketches and existing designs - before moving to successively more sophisticated data wireframes and data prototypes.},
}
@article{p566,
journal = {IEEE TVCG},
year = 2011,
title = {Improved Similarity Trees and their Application to Visual Data Classification},
doi = {10.1109/TVCG.2011.212},
url = {http://dx.doi.org/10.1109/TVCG.2011.212},
author = {Paiva, J.G. and Florian, L. and Pedrini, H. and Telles, G.P. and Minghim, R.},
pages = {2459--2468},
keywords = {Similarity Trees, Multidimensional Projections, Image Classification},
abstract = {An alternative form to multidimensional projections for the visual analysis of data represented in multidimensional spaces is the deployment of similarity trees, such as Neighbor Joining trees. They organize data objects on the visual plane emphasizing their levels of similarity with high capability of detecting and separating groups and subgroups of objects. Besides this similarity-based hierarchical data organization, some of their advantages include the ability to decrease point clutter; high precision; and a consistent view of the data set during focusing, offering a very intuitive way to view the general structure of the data set as well as to drill down to groups and subgroups of interest. Disadvantages of similarity trees based on neighbor joining strategies include their computational cost and the presence of virtual nodes that utilize too much of the visual space. This paper presents a highly improved version of the similarity tree technique. The improvements in the technique are given by two procedures. The first is a strategy that replaces virtual nodes by promoting real leaf nodes to their place, saving large portions of space in the display and maintaining the expressiveness and precision of the technique. The second improvement is an implementation that significantly accelerates the algorithm, impacting its use for larger data sets. We also illustrate the applicability of the technique in visual data mining, showing its advantages to support visual classification of data sets, with special attention to the case of image classification. We demonstrate the capabilities of the tree for analysis and iterative manipulation and employ those capabilities to support evolving to a satisfactory data organization and classification.},
}
@article{p567,
journal = {IEEE TVCG},
year = 2011,
title = {In Situ Exploration of Large Dynamic Networks},
doi = {10.1109/TVCG.2011.213},
url = {http://dx.doi.org/10.1109/TVCG.2011.213},
author = {Hadlak, S. and Schulz, H. and Schumann, H.},
pages = {2334--2343},
keywords = {Dynamic graph data, multiform visualization, multi-focus+context},
abstract = {The analysis of large dynamic networks poses a challenge in many fields, ranging from large bot-nets to social networks. As dynamic networks exhibit different characteristics, e.g., being of sparse or dense structure, or having a continuous or discrete time line, a variety of visualization techniques have been specifically designed to handle these different aspects of network structure and time. This wide range of existing techniques is well justified, as rarely a single visualization is suitable to cover the entire visual analysis. Instead, visual representations are often switched in the course of the exploration of dynamic graphs as the focus of analysis shifts between the temporal and the structural aspects of the data. To support such a switching in a seamless and intuitive manner, we introduce the concept of in situ visualization- a novel strategy that tightly integrates existing visualization techniques for dynamic networks. It does so by allowing the user to interactively select in a base visualization a region for which a different visualization technique is then applied and embedded in the selection made. This permits to change the way a locally selected group of data items, such as nodes or time points, are shown - right in the place where they are positioned, thus supporting the user's overall mental map. Using this approach, a user can switch seamlessly between different visual representations to adapt a region of a base visualization to the specifics of the data within it or to the current analysis focus. This paper presents and discusses the in situ visualization strategy and its implications for dynamic graph visualization. Furthermore, it illustrates its usefulness by employing it for the visual exploration of dynamic networks from two different fields: model versioning and wireless mesh networks.},
}
@article{p568,
journal = {IEEE TVCG},
year = 2011,
title = {Local Affine Multidimensional Projection},
doi = {10.1109/TVCG.2011.220},
url = {http://dx.doi.org/10.1109/TVCG.2011.220},
author = {Joia, P. and Paulovich, F.V. and Coimbra, D. and Cuminato, J.A. and Nonato, L.G.},
pages = {2563--2571},
keywords = {Multidimensional Projection, High Dimensional Data, Visual Data Mining},
abstract = {Multidimensional projection techniques have experienced many improvements lately, mainly regarding computational times and accuracy. However, existing methods do not yet provide flexible enough mechanisms for visualization-oriented fully interactive applications. This work presents a new multidimensional projection technique designed to be more flexible and versatile than other methods. This novel approach, called Local Affine Multidimensional Projection (LAMP), relies on orthogonal mapping theory to build accurate local transformations that can be dynamically modified according to user knowledge. The accuracy, flexibility and computational efficiency of LAMP is confirmed by a comprehensive set of comparisons. LAMP's versatility is exploited in an application which seeks to correlate data that, in principle, has no connection as well as in visual exploration of textual documents.},
}
@article{p569,
journal = {IEEE TVCG},
year = 2011,
title = {MoleView: An Attribute and Structure-Based Semantic Lens for Large Element-Based Plots},
doi = {10.1109/TVCG.2011.223},
url = {http://dx.doi.org/10.1109/TVCG.2011.223},
author = {Hurter, C. and Telea, A. and Ersoy, O.},
pages = {2600--2609},
keywords = {Semantic lenses, magic lenses, graph bundling, attribute filtering},
abstract = {We present MoleView, a novel technique for interactive exploration of multivariate relational data. Given a spatial embedding of the data, in terms of a scatter plot or graph layout, we propose a semantic lens which selects a specific spatial and attribute-related data range. The lens keeps the selected data in focus unchanged and continuously deforms the data out of the selection range in order to maintain the context around the focus. Specific deformations include distance-based repulsion of scatter plot points, deforming straight-line node-link graph drawings, and as varying the simplification degree of bundled edge graph layouts. Using a brushing-based technique, we further show the applicability of our semantic lens for scenarios requiring a complex selection of the zones of interest. Our technique is simple to implement and provides real-time performance on large datasets. We demonstrate our technique with actual data from air and road traffic control, medical imaging, and software comprehension applications.},
}
@article{p570,
journal = {IEEE TVCG},
year = 2011,
title = {Parallel Edge Splatting for Scalable Dynamic Graph Visualization},
doi = {10.1109/TVCG.2011.226},
url = {http://dx.doi.org/10.1109/TVCG.2011.226},
author = {Burch, M. and Vehlow, C. and Beck, F. and Diehl, S. and Weiskopf, D.},
pages = {2344--2353},
keywords = {Dynamic graph visualization, graph splatting, software visualization, software evolution},
abstract = {We present a novel dynamic graph visualization technique based on node-link diagrams. The graphs are drawn side-byside from left to right as a sequence of narrow stripes that are placed perpendicular to the horizontal time line. The hierarchically organized vertices of the graphs are arranged on vertical, parallel lines that bound the stripes; directed edges connect these vertices from left to right. To address massive overplotting of edges in huge graphs, we employ a splatting approach that transforms the edges to a pixel-based scalar field. This field represents the edge densities in a scalable way and is depicted by non-linear color mapping. The visualization method is complemented by interaction techniques that support data exploration by aggregation, filtering, brushing, and selective data zooming. Furthermore, we formalize graph patterns so that they can be interactively highlighted on demand. A case study on software releases explores the evolution of call graphs extracted from the JUnit open source software project. In a second application, we demonstrate the scalability of our approach by applying it to a bibliography dataset containing more than 1.5 million paper titles from 60 years of research history producing a vast amount of relations between title words.},
}
@article{p571,
journal = {IEEE TVCG},
year = 2011,
title = {Product Plots},
doi = {10.1109/TVCG.2011.227},
url = {http://dx.doi.org/10.1109/TVCG.2011.227},
author = {Wickham, H. and Hofmann, H.},
pages = {2223--2230},
keywords = {Statistics, joint distribution, conditional distribution, treemap, bar chart, mosaic plot},
abstract = {We propose a new framework for visualising tables of counts, proportions and probabilities. We call our framework product plots, alluding to the computation of area as a product of height and width, and the statistical concept of generating a joint distribution from the product of conditional and marginal distributions. The framework, with extensions, is sufficient to encompass over 20 visualisations previously described in fields of statistical graphics and infovis, including bar charts, mosaic plots, treemaps, equal area plots and fluctuation diagrams.},
}
@article{p572,
journal = {IEEE TVCG},
year = 2011,
title = {Quality Metrics in High-Dimensional Data Visualization: An Overview and Systematization},
doi = {10.1109/TVCG.2011.229},
url = {http://dx.doi.org/10.1109/TVCG.2011.229},
author = {Bertini, E. and Tatu, A. and Keim, D.A.},
pages = {2203--2212},
keywords = {Quality Metrics, High-Dimensional Data Visualization},
abstract = {In this paper, we present a systematization of techniques that use quality metrics to help in the visual exploration of meaningful patterns in high-dimensional data. In a number of recent papers, different quality metrics are proposed to automate the demanding search through large spaces of alternative visualizations (e.g., alternative projections or ordering), allowing the user to concentrate on the most promising visualizations suggested by the quality metrics. Over the last decade, this approach has witnessed a remarkable development but few reflections exist on how these methods are related to each other and how the approach can be developed further. For this purpose, we provide an overview of approaches that use quality metrics in high-dimensional data visualization and propose a systematization based on a thorough literature review. We carefully analyze the papers and derive a set of factors for discriminating the quality metrics, visualization techniques, and the process itself. The process is described through a reworked version of the well-known information visualization pipeline. We demonstrate the usefulness of our model by applying it to several existing approaches that use quality metrics, and we provide reflections on implications of our model for future research.},
}
@article{p573,
journal = {IEEE TVCG},
year = 2011,
title = {Sequence Surveyor: Leveraging Overview for Scalable Genomic Alignment Visualization},
doi = {10.1109/TVCG.2011.232},
url = {http://dx.doi.org/10.1109/TVCG.2011.232},
author = {Albers, D. and Dewey, C. and Gleicher, M.},
pages = {2392--2401},
keywords = {Bioinformatics Visualization, Perception Theory, Scalability Issues, Visual Design},
abstract = {In this paper, we introduce overview visualization tools for large-scale multiple genome alignment data. Genome alignment visualization and, more generally, sequence alignment visualization are an important tool for understanding genomic sequence data. As sequencing techniques improve and more data become available, greater demand is being placed on visualization tools to scale to the size of these new datasets. When viewing such large data, we necessarily cannot convey details, rather we specifically design overview tools to help elucidate large-scale patterns. Perceptual science, signal processing theory, and generality provide a framework for the design of such visualizations that can scale well beyond current approaches. We present Sequence Surveyor, a prototype that embodies these ideas for scalable multiple whole-genome alignment overview visualization. Sequence Surveyor visualizes sequences in parallel, displaying data using variable color, position, and aggregation encodings. We demonstrate how perceptual science can inform the design of visualization techniques that remain visually manageable at scale and how signal processing concepts can inform aggregation schemes that highlight global trends, outliers, and overall data distributions as the problem scales. These techniques allow us to visualize alignments with over 100 whole bacterial-sized genomes.},
}
@article{p574,
journal = {IEEE TVCG},
year = 2011,
title = {Skeleton-Based Edge Bundling for Graph Visualization},
doi = {10.1109/TVCG.2011.233},
url = {http://dx.doi.org/10.1109/TVCG.2011.233},
author = {Ersoy, O. and Hurter, C. and Paulovich, F.V. and Cantareiro, G. and Telea, A.},
pages = {2364--2373},
keywords = {Graph layouts, edge bundles, image-based information visualization},
abstract = {In this paper, we present a novel approach for constructing bundled layouts of general graphs. As layout cues for bundles, we use medial axes, or skeletons, of edges which are similar in terms of position information. We combine edge clustering, distance fields, and 2D skeletonization to construct progressively bundled layouts for general graphs by iteratively attracting edges towards the centerlines of level sets of their distance fields. Apart from clustering, our entire pipeline is image-based with an efficient implementation in graphics hardware. Besides speed and implementation simplicity, our method allows explicit control of the emphasis on structure of the bundled layout, i.e. the creation of strongly branching (organic-like) or smooth bundles. We demonstrate our method on several large real-world graphs.},
}
@article{p575,
journal = {IEEE TVCG},
year = 2011,
title = {Stereoscopic Highlighting: 2D Graph Visualization on Stereo Displays},
doi = {10.1109/TVCG.2011.234},
url = {http://dx.doi.org/10.1109/TVCG.2011.234},
author = {Alper, B. and Hollerer, T. and Kuchera-Morin, J. and Forbes, A.},
pages = {2325--2333},
keywords = {Graph visualization, stereo displays, virtual reality},
abstract = {In this paper we present a new technique and prototype graph visualization system, stereoscopic highlighting, to help answer accessibility and adjacency queries when interacting with a node-link diagram. Our technique utilizes stereoscopic depth to highlight regions of interest in a 2D graph by projecting these parts onto a plane closer to the viewpoint of the user. This technique aims to isolate and magnify specific portions of the graph that need to be explored in detail without resorting to other highlighting techniques like color or motion, which can then be reserved to encode other data attributes. This mechanism of stereoscopic highlighting also enables focus+context views by juxtaposing a detailed image of a region of interest with the overall graph, which is visualized at a further depth with correspondingly less detail. In order to validate our technique, we ran a controlled experiment with 16 subjects comparing static visual highlighting to stereoscopic highlighting on 2D and 3D graph layouts for a range of tasks. Our results show that while for most tasks the difference in performance between stereoscopic highlighting alone and static visual highlighting is not statistically significant, users performed better when both highlighting methods were used concurrently. In more complicated tasks, 3D layout with static visual highlighting outperformed 2D layouts with a single highlighting method. However, it did not outperform the 2D layout utilizing both highlighting techniques simultaneously. Based on these results, we conclude that stereoscopic highlighting is a promising technique that can significantly enhance graph visualizations for certain use cases.},
}
@article{p576,
journal = {IEEE TVCG},
year = 2011,
title = {Synthetic Generation of High-Dimensional Datasets},
doi = {10.1109/TVCG.2011.237},
url = {http://dx.doi.org/10.1109/TVCG.2011.237},
author = {Albuquerque, G. and Lowe, T. and Magnor, M.},
pages = {2317--2324},
keywords = {Synthetic data generation, multivariate data, high-dimensional data, interaction},
abstract = {Generation of synthetic datasets is a common practice in many research areas. Such data is often generated to meet specific needs or certain conditions that may not be easily found in the original, real data. The nature of the data varies according to the application area and includes text, graphs, social or weather data, among many others. The common process to create such synthetic datasets is to implement small scripts or programs, restricted to small problems or to a specific application. In this paper we propose a framework designed to generate high dimensional datasets. Users can interactively create and navigate through multi dimensional datasets using a suitable graphical user-interface. The data creation is driven by statistical distributions based on a few user-defined parameters. First, a grounding dataset is created according to given inputs, and then structures and trends are included in selected dimensions and orthogonal projection planes. Furthermore, our framework supports the creation of complex non-orthogonal trends and classified datasets. It can successfully be used to create synthetic datasets simulating important trends as multidimensional clusters, correlations and outliers.},
}
@article{p577,
journal = {IEEE TVCG},
year = 2011,
title = {TextFlow: Towards Better Understanding of Evolving Topics in Text},
doi = {10.1109/TVCG.2011.239},
url = {http://dx.doi.org/10.1109/TVCG.2011.239},
author = {Weiwei Cui and Shixia Liu and Li Tan and Conglei Shi and Yangqiu Song and Zekai Gao and Huamin Qu and Xin Tong},
pages = {2412--2421},
keywords = {Text visualization, Topic evolution, Hierarchical Dirichlet process, Critical event},
abstract = {Understanding how topics evolve in text data is an important and challenging task. Although much work has been devoted to topic analysis, the study of topic evolution has largely been limited to individual topics. In this paper, we introduce TextFlow, a seamless integration of visualization and topic mining techniques, for analyzing various evolution patterns that emerge from multiple topics. We first extend an existing analysis technique to extract three-level features: the topic evolution trend, the critical event, and the keyword correlation. Then a coherent visualization that consists of three new visual components is designed to convey complex relationships between them. Through interaction, the topic mining model and visualization can communicate with each other to help users refine the analysis result and gain insights into the data progressively. Finally, two case studies are conducted to demonstrate the effectiveness and usefulness of TextFlow in helping users understand the major topic evolution patterns in time-varying text data.},
}
@article{p578,
journal = {IEEE TVCG},
year = 2011,
title = {TreeNetViz: Revealing Patterns of Networks over Tree Structures},
doi = {10.1109/TVCG.2011.247},
url = {http://dx.doi.org/10.1109/TVCG.2011.247},
author = {Liang Gou and Xiaolong Zhang},
pages = {2449--2458},
keywords = {Compound graph, network and tree, TreeNetViz, visualization, multiscale and cross-scale},
abstract = {Network data often contain important attributes from various dimensions such as social affiliations and areas of expertise in a social network. If such attributes exhibit a tree structure, visualizing a compound graph consisting of tree and network structures becomes complicated. How to visually reveal patterns of a network over a tree has not been fully studied. In this paper, we propose a compound graph model, TreeNet, to support visualization and analysis of a network at multiple levels of aggregation over a tree. We also present a visualization design, TreeNetViz, to offer the multiscale and cross-scale exploration and interaction of a TreeNet graph. TreeNetViz uses a Radial, Space-Filling (RSF) visualization to represent the tree structure, a circle layout with novel optimization to show aggregated networks derived from TreeNet, and an edge bundling technique to reduce visual complexity. Our circular layout algorithm reduces both total edge-crossings and edge length and also considers hierarchical structure constraints and edge weight in a TreeNet graph. These experiments illustrate that the algorithm can reduce visual cluttering in TreeNet graphs. Our case study also shows that TreeNetViz has the potential to support the analysis of a compound graph by revealing multiscale and cross-scale network patterns.},
}
@article{p579,
journal = {IEEE TVCG},
year = 2011,
title = {VisBricks: Multiform Visualization of Large, Inhomogeneous Data},
doi = {10.1109/TVCG.2011.250},
url = {http://dx.doi.org/10.1109/TVCG.2011.250},
author = {Lex, A. and Schulz, H. and Streit, M. and Partl, C. and Schmalstieg, D.},
pages = {2291--2300},
keywords = {Inhomogeneous data, multiple coordinated views, multiform visualization},
abstract = {Large volumes of real-world data often exhibit inhomogeneities: vertically in the form of correlated or independent dimensions and horizontally in the form of clustered or scattered data items. In essence, these inhomogeneities form the patterns in the data that researchers are trying to find and understand. Sophisticated statistical methods are available to reveal these patterns, however, the visualization of their outcomes is mostly still performed in a one-view-fits-all manner, In contrast, our novel visualization approach, VisBricks, acknowledges the inhomogeneity of the data and the need for different visualizations that suit the individual characteristics of the different data subsets. The overall visualization of the entire data set is patched together from smaller visualizations, there is one VisBrick for each cluster in each group of interdependent dimensions. Whereas the total impression of all VisBricks together gives a comprehensive high-level overview of the different groups of data, each VisBrick independently shows the details of the group of data it represents, State-of-the-art brushing and visual linking between all VisBricks furthermore allows the comparison of the groupings and the distribution of data items among them. In this paper, we introduce the VisBricks visualization concept, discuss its design rationale and implementation, and demonstrate its usefulness by applying it to a use case from the field of biomedicine.},
}
@article{p580,
journal = {IEEE TVCG},
year = 2011,
title = {Visual Thinking In Action: Visualizations As Used On Whiteboards},
doi = {10.1109/TVCG.2011.251},
url = {http://dx.doi.org/10.1109/TVCG.2011.251},
author = {Walny, J. and Carpendale, S. and Riche, N.H. and Venolia, G. and Fawcett, P.},
pages = {2508--2517},
keywords = {Visualization, diagrams, whiteboards, observational study},
abstract = {While it is still most common for information visualization researchers to develop new visualizations from a data-or taskdriven perspective, there is growing interest in understanding the types of visualizations people create by themselves for personal use. As part of this recent direction, we have studied a large collection of whiteboards in a research institution, where people make active use of combinations of words, diagrams and various types of visuals to help them further their thought processes. Our goal is to arrive at a better understanding of the nature of visuals that are created spontaneously during brainstorming, thinking, communicating, and general problem solving on whiteboards. We use the qualitative approaches of open coding, interviewing, and affinity diagramming to explore the use of recognizable and novel visuals, and the interplay between visualization and diagrammatic elements with words, numbers and labels. We discuss the potential implications of our findings on information visualization design.},
}
@article{p581,
journal = {IEEE TVCG},
year = 2011,
title = {Visualization of Parameter Space for Image Analysis},
doi = {10.1109/TVCG.2011.253},
url = {http://dx.doi.org/10.1109/TVCG.2011.253},
author = {Pretorius, A.J. and Bray, M.-A.P. and Carpenter, A.E. and Ruddle, R.A.},
pages = {2402--2411},
keywords = {Information visualization, visual analytics, parameter space, image analysis, sampling},
abstract = {Image analysis algorithms are often highly parameterized and much human input is needed to optimize parameter settings. This incurs a time cost of up to several days. We analyze and characterize the conventional parameter optimization process for image analysis and formulate user requirements. With this as input, we propose a change in paradigm by optimizing parameters based on parameter sampling and interactive visual exploration. To save time and reduce memory load, users are only involved in the first step - initialization of sampling - and the last step - visual analysis of output. This helps users to more thoroughly explore the parameter space and produce higher quality results. We describe a custom sampling plug-in we developed for CellProfiler - a popular biomedical image analysis framework. Our main focus is the development of an interactive visualization technique that enables users to analyze the relationships between sampled input parameters and corresponding output. We implemented this in a prototype called Paramorama. It provides users with a visual overview of parameters and their sampled values. User-defined areas of interest are presented in a structured way that includes image-based output and a novel layout algorithm. To find optimal parameter settings, users can tag high- and low-quality results to refine their search. We include two case studies to illustrate the utility of this approach.},
}
@article{p582,
journal = {IEEE TVCG},
year = 2011,
title = {Visualization Rhetoric: Framing Effects in Narrative Visualization},
doi = {10.1109/TVCG.2011.255},
url = {http://dx.doi.org/10.1109/TVCG.2011.255},
author = {Hullman, J. and Diakopoulos, N.},
pages = {2231--2240},
keywords = {Rhetoric, narrative visualization, framing effects, semiotics, denotation, connotation },
abstract = {Narrative visualizations combine conventions of communicative and exploratory information visualization to convey an intended story. We demonstrate visualization rhetoric as an analytical framework for understanding how design techniques that prioritize particular interpretations in visualizations that "tell a story" can significantly affect end-user interpretation. We draw a parallel between narrative visualization interpretation and evidence from framing studies in political messaging, decision-making, and literary studies. Devices for understanding the rhetorical nature of narrative information visualizations are presented, informed by the rigorous application of concepts from critical theory, semiotics, journalism, and political theory. We draw attention to how design tactics represent additions or omissions of information at various levels-the data, visual representation, textual annotations, and interactivity-and how visualizations denote and connote phenomena with reference to unstated viewing conventions and codes. Classes of rhetorical techniques identified via a systematic analysis of recent narrative visualizations are presented, and characterized according to their rhetorical contribution to the visualization. We describe how designers and researchers can benefit from the potentially positive aspects of visualization rhetoric in designing engaging, layered narrative visualizations and how our framework can shed light on how a visualization design prioritizes specific interpretations. We identify areas where future inquiry into visualization rhetoric can improve understanding of visualization interpretation.},
}
@article{p696,
journal = {IEEE TVCG},
year = 2010,
title = {A Visual Backchannel for Large-Scale Events},
doi = {10.1109/TVCG.2010.129},
url = {http://dx.doi.org/10.1109/TVCG.2010.129},
author = {Dork, M. and Gruen, D. and Williamson, C. and Carpendale, S.},
pages = {1129--1138},
keywords = {Backchannel, information visualization, events, multiple views, microblogging, information retrieval, World Wide Web},
abstract = {We introduce the concept of a Visual Backchannel as a novel way of following and exploring online conversations about large-scale events. Microblogging communities, such as Twitter, are increasingly used as digital backchannels for timely exchange of brief comments and impressions during political speeches, sport competitions, natural disasters, and other large events. Currently, shared updates are typically displayed in the form of a simple list, making it difficult to get an overview of the fast-paced discussions as it happens in the moment and how it evolves over time. In contrast, our Visual Backchannel design provides an evolving, interactive, and multi-faceted visual overview of large-scale ongoing conversations on Twitter. To visualize a continuously updating information stream, we include visual saliency for what is happening now and what has just happened, set in the context of the evolving conversation. As part of a fully web-based coordinated-view system we introduce Topic Streams, a temporally adjustable stacked graph visualizing topics over time, a People Spiral representing participants and their activity, and an Image Cloud encoding the popularity of event photos by size. Together with a post listing, these mutually linked views support cross-filtering along topics, participants, and time ranges. We discuss our design considerations, in particular with respect to evolving visualizations of dynamically changing data. Initial feedback indicates significant interest and suggests several unanticipated uses.},
}
@article{p697,
journal = {IEEE TVCG},
year = 2010,
title = {An Extension of Wilkinson's Algorithm for Positioning Tick Labels on Axes},
doi = {10.1109/TVCG.2010.130},
url = {http://dx.doi.org/10.1109/TVCG.2010.130},
author = {Talbot, J. and Lin, S. and Hanrahan, P.},
pages = {1036--1043},
keywords = {Axis labeling, nice numbers},
abstract = {The non-data components of a visualization, such as axes and legends, can often be just as important as the data itself. They provide contextual information essential to interpreting the data. In this paper, we describe an automated system for choosing positions and labels for axis tick marks. Our system extends Wilkinson's optimization-based labeling approach to create a more robust, full-featured axis labeler. We define an expanded space of axis labelings by automatically generating additional nice numbers as needed and by permitting the extreme labels to occur inside the data range. These changes provide flexibility in problematic cases, without degrading quality elsewhere. We also propose an additional optimization criterion, legibility, which allows us to simultaneously optimize over label formatting, font size, and orientation. To solve this revised optimization problem, we describe the optimization function and an efficient search algorithm. Finally, we compare our method to previous work using both quantitative and qualitative metrics. This paper is a good example of how ideas from automated graphic design can be applied to information visualization.},
}
@article{p698,
journal = {IEEE TVCG},
year = 2010,
title = {behaviorism: a framework for dynamic data visualization},
doi = {10.1109/TVCG.2010.126},
url = {http://dx.doi.org/10.1109/TVCG.2010.126},
author = {Forbes, A. and Hollerer, T. and Legrady, G.},
pages = {1164--1171},
keywords = {Frameworks, information visualization, information art, dynamic data},
abstract = {While a number of information visualization software frameworks exist, creating new visualizations, especially those that involve novel visualization metaphors, interaction techniques, data analysis strategies, and specialized rendering algorithms, is still often a difficult process. To facilitate the creation of novel visualizations we present a new software framework, behaviorism, which provides a wide range of flexibility when working with dynamic information on visual, temporal, and ontological levels, but at the same time providing appropriate abstractions which allow developers to create prototypes quickly which can then easily be turned into robust systems. The core of the framework is a set of three interconnected graphs, each with associated operators: a scene graph for high-performance 3D rendering, a data graph for different layers of semantically-linked heterogeneous data, and a timing graph for sophisticated control of scheduling, interaction, and animation. In particular, the timing graph provides a unified system to add behaviors to both data and visual elements, as well as to the behaviors themselves. To evaluate the framework we look briefly at three different projects all of which required novel visualizations in different domains, and all of which worked with dynamic data in different ways: an interactive ecological simulation, an information art installation, and an information visualization technique.},
}
@article{p699,
journal = {IEEE TVCG},
year = 2010,
title = {Comparative Analysis of Multidimensional; Quantitative Data},
doi = {10.1109/TVCG.2010.138},
url = {http://dx.doi.org/10.1109/TVCG.2010.138},
author = {Lex, A. and Streit, M. and Partl, C. and Kashofer, K. and Schmalstieg, D.},
pages = {1027--1035},
keywords = {Multidimensional data, cluster comparison, bioinformatics visualization},
abstract = {When analyzing multidimensional, quantitative data, the comparison of two or more groups of dimensions is a common task. Typical sources of such data are experiments in biology, physics or engineering, which are conducted in different configurations and use replicates to ensure statistically significant results. One common way to analyze this data is to filter it using statistical methods and then run clustering algorithms to group similar values. The clustering results can be visualized using heat maps, which show differences between groups as changes in color. However, in cases where groups of dimensions have an a priori meaning, it is not desirable to cluster all dimensions combined, since a clustering algorithm can fragment continuous blocks of records. Furthermore, identifying relevant elements in heat maps becomes more difficult as the number of dimensions increases. To aid in such situations, we have developed Matchmaker, a visualization technique that allows researchers to arbitrarily arrange and compare multiple groups of dimensions at the same time. We create separate groups of dimensions which can be clustered individually, and place them in an arrangement of heat maps reminiscent of parallel coordinates. To identify relations, we render bundled curves and ribbons between related records in different groups. We then allow interactive drill-downs using enlarged detail views of the data, which enable in-depth comparisons of clusters between groups. To reduce visual clutter, we minimize crossings between the views. This paper concludes with two case studies. The first demonstrates the value of our technique for the comparison of clustering algorithms. In the second, biologists use our system to investigate why certain strains of mice develop liver disease while others remain healthy, informally showing the efficacy of our system when analyzing multidimensional data containing distinct groups of dimensions.},
}
@article{p700,
journal = {IEEE TVCG},
year = 2010,
title = {Declarative Language Design for Interactive Visualization},
doi = {10.1109/TVCG.2010.144},
url = {http://dx.doi.org/10.1109/TVCG.2010.144},
author = {Heer, J. and Bostock, M.},
pages = {1149--1156},
keywords = {Information visualization, user interfaces, toolkits, domain specific languages, declarative languages, optimization},
abstract = {We investigate the design of declarative, domain-specific languages for constructing interactive visualizations. By separating specification from execution, declarative languages can simplify development, enable unobtrusive optimization, and support retargeting across platforms. We describe the design of the Protovis specification language and its implementation within an object-oriented, statically-typed programming language (Java). We demonstrate how to support rich visualizations without requiring a toolkit-specific data model and extend Protovis to enable declarative specification of animated transitions. To support cross-platform deployment, we introduce rendering and event-handling infrastructures decoupled from the runtime platform, letting designers retarget visualization specifications (e.g., from desktop to mobile phone) with reduced effort. We also explore optimizations such as runtime compilation of visualization specifications, parallelized execution, and hardware-accelerated rendering. We present benchmark studies measuring the performance gains provided by these optimizations and compare performance to existing Java-based visualization tools, demonstrating scalability improvements exceeding an order of magnitude.},
}
@article{p701,
journal = {IEEE TVCG},
year = 2010,
title = {eSeeTrack—Visualizing Sequential fixation Patterns},
doi = {10.1109/TVCG.2010.149},
url = {http://dx.doi.org/10.1109/TVCG.2010.149},
author = {Hoi Ying Tsang and Tory, M. and Swindells, C.},
pages = {953--962},
keywords = {},
abstract = {We introduce eSeeTrack, an eye-tracking visualization prototype that facilitates exploration and comparison of sequential gaze orderings in a static or a dynamic scene. It extends current eye-tracking data visualizations by extracting patterns of sequential gaze orderings, displaying these patterns in a way that does not depend on the number of fixations on a scene, and enabling users to compare patterns from two or more sets of eye-gaze data. Extracting such patterns was very difficult with previous visualization techniques. eSeeTrack combines a timeline and a tree-structured visual representation to embody three aspects of eye-tracking data that users are interested in: duration, frequency and orderings of fixations. We demonstrate the usefulness of eSeeTrack via two case studies on surgical simulation and retail store chain data. We found that eSeeTrack allows ordering of fixations to be rapidly queried, explored and compared. Furthermore, our tool provides an effective and efficient mechanism to determine pattern outliers. This approach can be effective for behavior analysis in a variety of domains that are described at the end of this paper.},
}
@article{p702,
journal = {IEEE TVCG},
year = 2010,
title = {Evaluating the impact of task demands and block resolution on the effectiveness of pixel-based visualization},
doi = {10.1109/TVCG.2010.150},
url = {http://dx.doi.org/10.1109/TVCG.2010.150},
author = {Borgo, R. and Proctor, K. and Chen, M. and Jänicke, H. and Murray, T. and Thornton, I.M.},
pages = {963--972},
keywords = {Pixel-based visualization, evaluation, user study, visual search, change detection},
abstract = {Pixel-based visualization is a popular method of conveying large amounts of numerical data graphically. Application scenarios include business and finance, bioinformatics and remote sensing. In this work, we examined how the usability of such visual representations varied across different tasks and block resolutions. The main stimuli consisted of temporal pixel-based visualization with a white-red color map, simulating monthly temperature variation over a six-year period. In the first study, we included 5 separate tasks to exert different perceptual loads. We found that performance varied considerably as a function of task, ranging from 75% correct in low-load tasks to below 40% in high-load tasks. There was a small but consistent effect of resolution, with the uniform patch improving performance by around 6% relative to higher block resolution. In the second user study, we focused on a high-load task for evaluating month-to-month changes across different regions of the temperature range. We tested both CIE L*u*v* and RGB color spaces. We found that the nature of the change-evaluation errors related directly to the distance between the compared regions in the mapped color space. We were able to reduce such errors by using multiple color bands for the same data range. In a final study, we examined more fully the influence of block resolution on performance, and found block resolution had a limited impact on the effectiveness of pixel-based visualization.},
}
@article{p703,
journal = {IEEE TVCG},
year = 2010,
title = {FacetAtlas: Multifaceted Visualization for Rich Text Corpora},
doi = {10.1109/TVCG.2010.154},
url = {http://dx.doi.org/10.1109/TVCG.2010.154},
author = {Nan Cao and Jimeng Sun and Yu-Ru Lin and Gotz, D. and Shixia Liu and Huamin Qu},
pages = {1172--1181},
keywords = {Multi-facet visualization, Text visualization, Multi-relational Graph, Search UI},
abstract = {Documents in rich text corpora usually contain multiple facets of information. For example, an article about a specific disease often consists of different facets such as symptom, treatment, cause, diagnosis, prognosis, and prevention. Thus, documents may have different relations based on different facets. Powerful search tools have been developed to help users locate lists of individual documents that are most related to specific keywords. However, there is a lack of effective analysis tools that reveal the multifaceted relations of documents within or cross the document clusters. In this paper, we present FacetAtlas, a multifaceted visualization technique for visually analyzing rich text corpora. FacetAtlas combines search technology with advanced visual analytical tools to convey both global and local patterns simultaneously. We describe several unique aspects of FacetAtlas, including (1) node cliques and multifaceted edges, (2) an optimized density map, and (3) automated opacity pattern enhancement for highlighting visual patterns, (4) interactive context switch between facets. In addition, we demonstrate the power of FacetAtlas through a case study that targets patient education in the health care domain. Our evaluation shows the benefits of this work, especially in support of complex multifaceted data analysis.},
}
@article{p704,
journal = {IEEE TVCG},
year = 2010,
title = {GeneaQuilts: A System for Exploring Large Genealogies},
doi = {10.1109/TVCG.2010.159},
url = {http://dx.doi.org/10.1109/TVCG.2010.159},
author = {Bezerianos, A. and Dragicevic, P. and Fekete, J. and Juhee Bae and Watson, B.},
pages = {1073--1081},
keywords = {Genealogy visualization, interaction},
abstract = {GeneaQuilts is a new visualization technique for representing large genealogies of up to several thousand individuals. The visualization takes the form of a diagonally-filled matrix, where rows are individuals and columns are nuclear families. After identifying the major tasks performed in genealogical research and the limits of current software, we present an interactive genealogy exploration system based on GeneaQuilts. The system includes an overview, a timeline, search and filtering components, and a new interaction technique called Bring & Slide that allows fluid navigation in very large genealogies. We report on preliminary feedback from domain experts and show how our system supports a number of their tasks.},
}
@article{p705,
journal = {IEEE TVCG},
year = 2010,
title = {Graphical inference for infovis},
doi = {10.1109/TVCG.2010.161},
url = {http://dx.doi.org/10.1109/TVCG.2010.161},
author = {Wickham, H. and Cook, D. and Hofmann, H. and Buja, A.},
pages = {973--979},
keywords = {Statistics, visual testing, permutation tests, null hypotheses, data plots},
abstract = {How do we know if what we see is really there? When visualizing data, how do we avoid falling into the trap of apophenia where we see patterns in random noise? Traditionally, infovis has been concerned with discovering new relationships, and statistics with preventing spurious relationships from being reported. We pull these opposing poles closer with two new techniques for rigorous statistical inference of visual discoveries. The "Rorschach" helps the analyst calibrate their understanding of uncertainty and "line-up" provides a protocol for assessing the significance of visual discoveries, protecting against the discovery of spurious structure.},
}
@article{p706,
journal = {IEEE TVCG},
year = 2010,
title = {Graphical Perception of Multiple Time Series},
doi = {10.1109/TVCG.2010.162},
url = {http://dx.doi.org/10.1109/TVCG.2010.162},
author = {Javed, W. and McDonnel, B. and Elmqvist, N.},
pages = {927--934},
keywords = {Line graphs, braided graphs, horizon graphs, small multiples, stacked graphs, evaluation, design guidelines},
abstract = {Line graphs have been the visualization of choice for temporal data ever since the days of William Playfair (1759-1823), but realistic temporal analysis tasks often include multiple simultaneous time series. In this work, we explore user performance for comparison, slope, and discrimination tasks for different line graph techniques involving multiple time series. Our results show that techniques that create separate charts for each time series--such as small multiples and horizon graphs--are generally more efficient for comparisons across time series with a large visual span. On the other hand, shared-space techniques--like standard line graphs--are typically more efficient for comparisons over smaller visual spans where the impact of overlap and clutter is reduced.},
}
@article{p707,
journal = {IEEE TVCG},
year = 2010,
title = {Gremlin: An Interactive Visualization Model for Analyzing Genomic Rearrangements},
doi = {10.1109/TVCG.2010.163},
url = {http://dx.doi.org/10.1109/TVCG.2010.163},
author = {O'Brien, T.M. and Ritz, A.M. and Raphael, B.J. and Laidlaw, D.H.},
pages = {918--926},
keywords = {Information visualization, bioinformatics, insight-based evaluation},
abstract = {In this work we present, apply, and evaluate a novel, interactive visualization model for comparative analysis of structural variants and rearrangements in human and cancer genomes, with emphasis on data integration and uncertainty visualization. To support both global trend analysis and local feature detection, this model enables explorations continuously scaled from the high-level, complete genome perspective, down to the low-level, structural rearrangement view, while preserving global context at all times. We have implemented these techniques in Gremlin, a genomic rearrangement explorer with multi-scale, linked interactions, which we apply to four human cancer genome data sets for evaluation. Using an insight-based evaluation methodology, we compare Gremlin to Circos, the state-of-the-art in genomic rearrangement visualization, through a small user study with computational biologists working in rearrangement analysis. Results from user study evaluations demonstrate that this visualization model enables more total insights, more insights per minute, and more complex insights than the current state-of-the-art for visual analysis and exploration of genome rearrangements.},
}
@article{p708,
journal = {IEEE TVCG},
year = 2010,
title = {How Information Visualization Novices Construct Visualizations},
doi = {10.1109/TVCG.2010.164},
url = {http://dx.doi.org/10.1109/TVCG.2010.164},
author = {Grammel, L. and Tory, M. and Storey, M.},
pages = {943--952},
keywords = {Empirical study, visualization, visualization construction, visual analytics, visual mapping, novices},
abstract = {It remains challenging for information visualization novices to rapidly construct visualizations during exploratory data analysis. We conducted an exploratory laboratory study in which information visualization novices explored fictitious sales data by communicating visualization specifications to a human mediator, who rapidly constructed the visualizations using commercial visualization software. We found that three activities were central to the iterative visualization construction process: data attribute selection, visual template selection, and visual mapping specification. The major barriers faced by the participants were translating questions into data attributes, designing visual mappings, and interpreting the visualizations. Partial specification was common, and the participants used simple heuristics and preferred visualizations they were already familiar with, such as bar, line and pie charts. We derived abstract models from our observations that describe barriers in the data exploration process and uncovered how information visualization novices think about visualization specifications. Our findings support the need for tools that suggest potential visualizations and support iterative refinement, that provide explanations and help with learning, and that are tightly integrated into tool support for the overall visual analytics process.},
}
@article{p709,
journal = {IEEE TVCG},
year = 2010,
title = {Laws of Attraction: From Perceptual Forces to Conceptual Similarity},
doi = {10.1109/TVCG.2010.174},
url = {http://dx.doi.org/10.1109/TVCG.2010.174},
author = {Ziemkiewicz, C. and Kosara, R.},
pages = {1009--1016},
keywords = {Perceptual cognition, visualization models, laboratory studies, cognition theory},
abstract = {Many of the pressing questions in information visualization deal with how exactly a user reads a collection of visual marks as information about relationships between entities. Previous research has suggested that people see parts of a visualization as objects, and may metaphorically interpret apparent physical relationships between these objects as suggestive of data relationships. We explored this hypothesis in detail in a series of user experiments. Inspired by the concept of implied dynamics in psychology, we first studied whether perceived gravity acting on a mark in a scatterplot can lead to errors in a participant's recall of the mark's position. The results of this study suggested that such position errors exist, but may be more strongly influenced by attraction between marks. We hypothesized that such apparent attraction may be influenced by elements used to suggest relationship between objects, such as connecting lines, grouping elements, and visual similarity. We further studied what visual elements are most likely to cause this attraction effect, and whether the elements that best predicted attraction errors were also those which suggested conceptual relationships most strongly. Our findings show a correlation between attraction errors and intuitions about relatedness, pointing towards a possible mechanism by which the perception of visual marks becomes an interpretation of data relationships.},
}
@article{p710,
journal = {IEEE TVCG},
year = 2010,
title = {ManiWordle: Providing Flexible Control over Wordle},
doi = {10.1109/TVCG.2010.175},
url = {http://dx.doi.org/10.1109/TVCG.2010.175},
author = {Koh, K. and Bongshin Lee and Bohyoung Kim and Jinwook Seo},
pages = {1190--1197},
keywords = {Interaction design, direct manipulation, flexibilty-usability tradeoff, tag-cloud, participatory visualization, user study },
abstract = {Among the multifarious tag-clouding techniques, Wordle stands out to the community by providing an aesthetic layout, eliciting the emergence of the participatory culture and usage of tag-clouding in the artistic creations. In this paper, we introduce ManiWordle, a Wordle-based visualization tool that revamps interactions with the layout by supporting custom manipulations. ManiWordle allows people to manipulate typography, color, and composition not only for the layout as a whole, but also for the individual words, enabling them to have better control over the layout result. We first describe our design rationale along with the interaction techniques for tweaking the layout. We then present the results both from the preliminary usability study and from the comparative study between ManiWordle and Wordle. The results suggest that ManiWordle provides higher user satisfaction and an efficient method of creating the desired "art work," harnessing the power behind the ever-increasing popularity of Wordle.},
}
@article{p711,
journal = {IEEE TVCG},
year = 2010,
title = {Matching Visual Saliency to Confidence in Plots of Uncertain Data},
doi = {10.1109/TVCG.2010.176},
url = {http://dx.doi.org/10.1109/TVCG.2010.176},
author = {Feng, D. and Kwock, L. and Yueh Lee and Taylor, R.M.},
pages = {980--989},
keywords = {Uncertainty visualization, brushing, scatter plots, parallel coordinates, multivariate data},
abstract = {Conveying data uncertainty in visualizations is crucial for preventing viewers from drawing conclusions based on untrustworthy data points. This paper proposes a methodology for efficiently generating density plots of uncertain multivariate data sets that draws viewers to preattentively identify values of high certainty while not calling attention to uncertain values. We demonstrate how to augment scatter plots and parallel coordinates plots to incorporate statistically modeled uncertainty and show how to integrate them with existing multivariate analysis techniques, including outlier detection and interactive brushing. Computing high quality density plots can be expensive for large data sets, so we also describe a probabilistic plotting technique that summarizes the data without requiring explicit density plot computation. These techniques have been useful for identifying brain tumors in multivariate magnetic resonance spectroscopy data and we describe how to extend them to visualize ensemble data sets.},
}
@article{p712,
journal = {IEEE TVCG},
year = 2010,
title = {Mental Models; Visual Reasoning and Interaction in Information Visualization: A Top-down Perspective},
doi = {10.1109/TVCG.2010.177},
url = {http://dx.doi.org/10.1109/TVCG.2010.177},
author = {Zhicheng Liu and Stasko, J.},
pages = {999--1008},
keywords = {Mental model, model-based reasoning, distributed cognition, interaction, theory, information visualization},
abstract = {Although previous research has suggested that examining the interplay between internal and external representations can benefit our understanding of the role of information visualization (InfoVis) in human cognitive activities, there has been little work detailing the nature of internal representations, the relationship between internal and external representations and how interaction is related to these representations. In this paper, we identify and illustrate a specific kind of internal representation, mental models, and outline the high-level relationships between mental models and external visualizations. We present a top-down perspective of reasoning as model construction and simulation, and discuss the role of visualization in model based reasoning. From this perspective, interaction can be understood as active modeling for three primary purposes: external anchoring, information foraging, and cognitive offloading. Finally we discuss the implications of our approach for design, evaluation and theory development.},
}
@article{p713,
journal = {IEEE TVCG},
year = 2010,
title = {MulteeSum: A Tool for Comparative Spatial and Temporal Gene Expression Data},
doi = {10.1109/TVCG.2010.137},
url = {http://dx.doi.org/10.1109/TVCG.2010.137},
author = {Meyer, M. and Munzner, T. and DePace, A. and Pfister, H.},
pages = {908--917},
keywords = {Spatial data, temporal data, gene expression},
abstract = {Cells in an organism share the same genetic information in their DNA, but have very different forms and behavior because of the selective expression of subsets of their genes. The widely used approach of measuring gene expression over time from a tissue sample using techniques such as microarrays or sequencing do not provide information about the spatial position with in the tissue where these genes are expressed. In contrast, we are working with biologists who use techniques that measure gene expression in every individual cell of entire fruitfly embryos over an hour of their development, and do so for multiple closely-related subspecies of Drosophila. These scientists are faced with the challenge of integrating temporal gene expression data with the spatial location of cells and, moreover, comparing this data across multiple related species. We have worked with these biologists over the past two years to develop MulteeSum, a visualization system that supports inspection and curation of data sets showing gene expression over time, in conjunction with the spatial location of the cells where the genes are expressed - it is the first tool to support comparisons across multiple such data sets. MulteeSum is part of a general and flexible framework we developed with our collaborators that is built around multiple summaries for each cell, allowing the biologists to explore the results of computations that mix spatial information, gene expression measurements over time, and data from multiple related species or organisms. We justify our design decisions based on specific descriptions of the analysis needs of our collaborators, and provide anecdotal evidence of the efficacy of MulteeSum through a series of case studies.},
}
@article{p714,
journal = {IEEE TVCG},
year = 2010,
title = {Narrative Visualization: Telling Stories with Data},
doi = {10.1109/TVCG.2010.179},
url = {http://dx.doi.org/10.1109/TVCG.2010.179},
author = {Segel, E. and Heer, J.},
pages = {1139--1148},
keywords = {Narrative visualization, storytelling, design methods, case study, journalism, social data analysis},
abstract = {Data visualization is regularly promoted for its ability to reveal stories within data, yet these ÔÇ£data storiesÔÇØ differ in important ways from traditional forms of storytelling. Storytellers, especially online journalists, have increasingly been integrating visualizations into their narratives, in some cases allowing the visualization to function in place of a written story. In this paper, we systematically review the design space of this emerging class of visualizations. Drawing on case studies from news media to visualization research, we identify distinct genres of narrative visualization. We characterize these design differences, together with interactivity and messaging, in terms of the balance between the narrative flow intended by the author (imposed by graphical elements and the interface) and story discovery on the part of the reader (often through interactive exploration). Our framework suggests design strategies for narrative visualization, including promising under-explored approaches to journalistic storytelling and educational media.},
}
@article{p715,
journal = {IEEE TVCG},
year = 2010,
title = {Necklace Maps},
doi = {10.1109/TVCG.2010.180},
url = {http://dx.doi.org/10.1109/TVCG.2010.180},
author = {Speckmann, B. and Verbeek, K.},
pages = {881--889},
keywords = {Geographic Visualization, Automated Cartography, Proportional Symbol Maps, Necklace Maps},
abstract = {Statistical data associated with geographic regions is nowadays globally available in large amounts and hence automated methods to visually display these data are in high demand. There are several well-established thematic map types for quantitative data on the ratio-scale associated with regions: choropleth maps, cartograms, and proportional symbol maps. However, all these maps suffer from limitations, especially if large data values are associated with small regions. To overcome these limitations, we propose a novel type of quantitative thematic map, the necklace map. In a necklace map, the regions of the underlying two-dimensional map are projected onto intervals on a one-dimensional curve (the necklace) that surrounds the map regions. Symbols are scaled such that their area corresponds to the data of their region and placed without overlap inside the corresponding interval on the necklace. Necklace maps appear clear and uncluttered and allow for comparatively large symbol sizes. They visualize data sets well which are not proportional to region sizes. The linear ordering of the symbols along the necklace facilitates an easy comparison of symbol sizes. One map can contain several nested or disjoint necklaces to visualize clustered data. The advantages of necklace maps come at a price: the association between a symbol and its region is weaker than with other types of maps. Interactivity can help to strengthen this association if necessary. We present an automated approach to generate necklace maps which allows the user to interactively control the final symbol placement. We validate our approach with experiments using various data sets and maps.},
}
@article{p716,
journal = {IEEE TVCG},
year = 2010,
title = {OpinionSeer: Interactive Visualization of Hotel Customer Feedback},
doi = {10.1109/TVCG.2010.183},
url = {http://dx.doi.org/10.1109/TVCG.2010.183},
author = {Yingcai Wu and Furu Wei and Shixia Liu and Au, N. and Weiwei Cui and Hong Zhou and Huamin Qu},
pages = {1109--1118},
keywords = {opinion visualization, radial visualization, uncertainty visualization},
abstract = {The rapid development of Web technology has resulted in an increasing number of hotel customers sharing their opinions on the hotel services. Effective visual analysis of online customer opinions is needed, as it has a significant impact on building a successful business. In this paper, we present OpinionSeer, an interactive visualization system that could visually analyze a large collection of online hotel customer reviews. The system is built on a new visualization-centric opinion mining technique that considers uncertainty for faithfully modeling and analyzing customer opinions. A new visual representation is developed to convey customer opinions by augmenting well-established scatterplots and radial visualization. To provide multiple-level exploration, we introduce subjective logic to handle and organize subjective opinions with degrees of uncertainty. Several case studies illustrate the effectiveness and usefulness of OpinionSeer on analyzing relationships among multiple data dimensions and comparing opinions of different groups. Aside from data on hotel customer feedback, OpinionSeer could also be applied to visually analyze customer opinions on other products or services.},
}
@article{p717,
journal = {IEEE TVCG},
year = 2010,
title = {Pargnostics: Screen-Space Metrics for Parallel Coordinates},
doi = {10.1109/TVCG.2010.184},
url = {http://dx.doi.org/10.1109/TVCG.2010.184},
author = {Dasgupta, A. and Kosara, R.},
pages = {1017--1026},
keywords = {Parallel coordinates, metrics, display optimization, visualization models},
abstract = {Interactive visualization requires the translation of data into a screen space of limited resolution. While currently ignored by most visualization models, this translation entails a loss of information and the introduction of a number of artifacts that can be useful, (e.g., aggregation, structures) or distracting (e.g., over-plotting, clutter) for the analysis. This phenomenon is observed in parallel coordinates, where overlapping lines between adjacent axes form distinct patterns, representing the relation between variables they connect. However, even for a small number of dimensions, the challenge is to effectively convey the relationships for all combinations of dimensions. The size of the dataset and a large number of dimensions only add to the complexity of this problem. To address these issues, we propose Pargnostics, parallel coordinates diagnostics, a model based on screen-space metrics that quantify the different visual structures. Pargnostics metrics are calculated for pairs of axes and take into account the resolution of the display as well as potential axis inversions. Metrics include the number of line crossings, crossing angles, convergence, overplotting, etc. To construct a visualization view, the user can pick from a ranked display showing pairs of coordinate axes and the structures between them, or examine all possible combinations of axes at once in a matrix display. Picking the best axes layout is an NP-complete problem in general, but we provide a way of automatically optimizing the display according to the user's preferences based on our metrics and model.},
}
@article{p718,
journal = {IEEE TVCG},
year = 2010,
title = {PedVis: A Structured; Space-Efficient Technique for Pedigree Visualization},
doi = {10.1109/TVCG.2010.185},
url = {http://dx.doi.org/10.1109/TVCG.2010.185},
author = {Tuttle, C. and Nonato, L.G. and Silva, C.T.},
pages = {1063--1072},
keywords = {Genealogy, Pedigree, H-tree},
abstract = {Public genealogical databases are becoming increasingly populated with historical data and records of the current population's ancestors. As this increasing amount of available information is used to link individuals to their ancestors, the resulting trees become deeper and more dense, which justifies the need for using organized, space-efficient layouts to display the data. Existing layouts are often only able to show a small subset of the data at a time. As a result, it is easy to become lost when navigating through the data or to lose sight of the overall tree structure. On the contrary, leaving space for unknown ancestors allows one to better understand the tree's structure, but leaving this space becomes expensive and allows fewer generations to be displayed at a time. In this work, we propose that the H-tree based layout be used in genealogical software to display ancestral trees. We will show that this layout presents an increase in the number of displayable generations, provides a nicely arranged, symmetrical, intuitive and organized fractal structure, increases the user's ability to understand and navigate through the data, and accounts for the visualization requirements necessary for displaying such trees. Finally, user-study results indicate potential for user acceptance of the new layout.},
}
@article{p719,
journal = {IEEE TVCG},
year = 2010,
title = {Perceptual Guidelines for Creating Rectangular Treemaps},
doi = {10.1109/TVCG.2010.186},
url = {http://dx.doi.org/10.1109/TVCG.2010.186},
author = {Kong, N. and Heer, J. and Agrawala, M.},
pages = {990--998},
keywords = {Graphical Perception, Visualization, Treemaps, Rectangular Area, Visual Encoding, Experiment, Mechanical Turk},
abstract = {Treemaps are space-filling visualizations that make efficient use of limited display space to depict large amounts of hierarchical data. Creating perceptually effective treemaps requires carefully managing a number of design parameters including the aspect ratio and luminance of rectangles. Moreover, treemaps encode values using area, which has been found to be less accurate than judgments of other visual encodings, such as length. We conduct a series of controlled experiments aimed at producing a set of design guidelines for creating effective rectangular treemaps. We find no evidence that luminance affects area judgments, but observe that aspect ratio does have an effect. Specifically, we find that the accuracy of area comparisons suffers when the compared rectangles have extreme aspect ratios or when both are squares. Contrary to common assumptions, the optimal distribution of rectangle aspect ratios within a treemap should include non-squares, but should avoid extremes. We then compare treemaps with hierarchical bar chart displays to identify the data densities at which length-encoded bar charts become less effective than area-encoded treemaps. We report the transition points at which treemaps exhibit judgment accuracy on par with bar charts for both leaf and non-leaf tree nodes. We also find that even at relatively low data densities treemaps result in faster comparisons than bar charts. Based on these results, we present a set of guidelines for the effective use of treemaps and suggest alternate approaches for treemap layout.},
}
@article{p720,
journal = {IEEE TVCG},
year = 2010,
title = {Rethinking Map Legends with Visualization},
doi = {10.1109/TVCG.2010.191},
url = {http://dx.doi.org/10.1109/TVCG.2010.191},
author = {Dykes, J. and Wood, J. and Slingsby, A.},
pages = {890--899},
keywords = {Cartography, design, Digimap service, legend, online web mapping, visualization},
abstract = {This design paper presents new guidance for creating map legends in a dynamic environment. Our contribution is a set ofguidelines for legend design in a visualization context and a series of illustrative themes through which they may be expressed. Theseare demonstrated in an applications context through interactive software prototypes. The guidelines are derived from cartographicliterature and in liaison with EDINA who provide digital mapping services for UK tertiary education. They enhance approaches tolegend design that have evolved for static media with visualization by considering: selection, layout, symbols, position, dynamismand design and process. Broad visualization legend themes include: The Ground Truth Legend, The Legend as Statistical Graphicand The Map is the Legend. Together, these concepts enable us to augment legends with dynamic properties that address specificneeds, rethink their nature and role and contribute to a wider re-evaluation of maps as artifacts of usage rather than statements offact. EDINA has acquired funding to enhance their clients with visualization legends that use these concepts as a consequence ofthis work. The guidance applies to the design of a wide range of legends and keys used in cartography and information visualization.},
}
@article{p721,
journal = {IEEE TVCG},
year = 2010,
title = {SignalLens: Focus+Context Applied to Electronic Time Series},
doi = {10.1109/TVCG.2010.193},
url = {http://dx.doi.org/10.1109/TVCG.2010.193},
author = {Kincaid, R.},
pages = {900--907},
keywords = {Focus+Context, Lens, Test and Measurement, Electronic Signal, Signal Processing },
abstract = {Electronic test and measurement systems are becoming increasingly sophisticated in order to match the increased complexity and ultra-high speed of the devices under test. A key feature in many such instruments is a vastly increased capacity for storage of digital signals. Storage of 109 time points or more is now possible. At the same time, the typical screens on such measurement devices are relatively small. Therefore, these instruments can only render an extremely small fraction of the complete signal at any time. SignalLens uses a Focus+Context approach to provide a means of navigating to and inspecting low-level signal details in the context of the entire signal trace. This approach provides a compact visualization suitable for embedding into the small displays typically provided by electronic measurement instruments. We further augment this display with computed tracks which display time-aligned computed properties of the signal. By combining and filtering these computed tracks it is possible to easily and quickly find computationally detected features in the data which are often obscured by the visual compression required to render the large data sets on a small screen. Further, these tracks can be viewed in the context of the entire signal trace as well as visible high-level signal features. Several examples using real-world electronic measurement data are presented, which demonstrate typical use cases and the effectiveness of the design.},
}
@article{p722,
journal = {IEEE TVCG},
year = 2010,
title = {SparkClouds: Visualizing Trends in Tag Clouds},
doi = {10.1109/TVCG.2010.194},
url = {http://dx.doi.org/10.1109/TVCG.2010.194},
author = {Bongshin Lee and Riche, N.H. and Karlson, A. and Carpendale, S.},
pages = {1182--1189},
keywords = {Tag clouds, trend visualization, multiple line graphs, stacked bar charts, evaluation},
abstract = {Tag clouds have proliferated over the web over the last decade. They provide a visual summary of a collection of texts by visually depicting the tag frequency by font size. In use, tag clouds can evolve as the associated data source changes over time. Interesting discussions around tag clouds often include a series of tag clouds and consider how they evolve over time. However, since tag clouds do not explicitly represent trends or support comparisons, the cognitive demands placed on the person for perceiving trends in multiple tag clouds are high. In this paper, we introduce SparkClouds, which integrate sparklines into a tag cloud to convey trends between multiple tag clouds. We present results from a controlled study that compares SparkClouds with two traditional trend visualizations-multiple line graphs and stacked bar charts-as well as Parallel Tag Clouds. Results show that SparkClouds' ability to show trends compares favourably to the alternative visualizations.},
}
@article{p723,
journal = {IEEE TVCG},
year = 2010,
title = {Stacking Graphic Elements to Avoid Over-Plotting},
doi = {10.1109/TVCG.2010.197},
url = {http://dx.doi.org/10.1109/TVCG.2010.197},
author = {Tuan Nhon Dang and Wilkinson, L. and Anand, A.},
pages = {1044--1052},
keywords = {Dot plots, Parallel coordinate plots, Multidimensional data, Density-based visualization},
abstract = {An ongoing challenge for information visualization is how to deal with over-plotting forced by ties or the relatively limited visual field of display devices. A popular solution is to represent local data density with area (bubble plots, treemaps), color(heatmaps), or aggregation (histograms, kernel densities, pixel displays). All of these methods have at least one of three deficiencies:1) magnitude judgments are biased because area and color have convex downward perceptual functions, 2) area, hue, and brightnesshave relatively restricted ranges of perceptual intensity compared to length representations, and/or 3) it is difficult to brush or link toindividual cases when viewing aggregations. In this paper, we introduce a new technique for visualizing and interacting with datasets that preserves density information by stacking overlapping cases. The overlapping data can be points or lines or other geometric elements, depending on the type of plot. We show real-dataset applications of this stacking paradigm and compare them to other techniques that deal with over-plotting in high-dimensional displays.},
}
@article{p724,
journal = {IEEE TVCG},
year = 2010,
title = {The FlowVizMenu and Parallel Scatterplot Matrix: Hybrid Multidimensional Visualizations for Network Exploration},
doi = {10.1109/TVCG.2010.205},
url = {http://dx.doi.org/10.1109/TVCG.2010.205},
author = {Viau, C. and McGuffin, M.J. and Chiricota, Y. and Jurisica, I.},
pages = {1100--1108},
keywords = {Interactive graph drawing, network layout, attribute-driven layout, parallel coordinates, scatterplot matrix, radial menu},
abstract = {A standard approach for visualizing multivariate networks is to use one or more multidimensional views (for example, scatterplots) for selecting nodes by various metrics, possibly coordinated with a node-link view of the network. In this paper, we present three novel approaches for achieving a tighter integration of these views through hybrid techniques for multidimensional visualization, graph selection and layout. First, we present the FlowVizMenu, a radial menu containing a scatterplot that can be popped up transiently and manipulated with rapid, fluid gestures to select and modify the axes of its scatterplot. Second, the FlowVizMenu can be used to steer an attribute-driven layout of the network, causing certain nodes of a node-link diagram to move toward their corresponding positions in a scatterplot while others can be positioned manually or by force-directed layout. Third, we describe a novel hybrid approach that combines a scatterplot matrix (SPLOM) and parallel coordinates called the Parallel Scatterplot Matrix (P-SPLOM), which can be used to visualize and select features within the network. We also describe a novel arrangement of scatterplots called the Scatterplot Staircase (SPLOS) that requires less space than a traditional scatterplot matrix. Initial user feedback is reported.},
}
@article{p725,
journal = {IEEE TVCG},
year = 2010,
title = {The Streams of Our Lives: Visualizing Listening Histories in Context},
doi = {10.1109/TVCG.2010.206},
url = {http://dx.doi.org/10.1109/TVCG.2010.206},
author = {Baur, D. and Seiffert, F. and Sedlmair, M. and Boring, S.},
pages = {1119--1128},
keywords = {Information visualization, lifelogging, design study, music, listening history, timelines, photos, calendars},
abstract = {The choices we take when listening to music are expressions of our personal taste and character. Storing and accessing our listening histories is trivial due to services like Last.fm, but learning from them and understanding them is not. Existing solutions operate at a very abstract level and only produce statistics. By applying techniques from information visualization to this problem, we were able to provide average people with a detailed and powerful tool for accessing their own musical past. LastHistory is an interactive visualization for displaying music listening histories, along with contextual information from personal photos and calendar entries. Its two main user tasks are (1) analysis, with an emphasis on temporal patterns and hypotheses related to musical genre and sequences, and (2) reminiscing, where listening histories and context represent part of one's past. In this design study paper we give an overview of the field of music listening histories and explain their unique characteristics as a type of personal data. We then describe the design rationale, data and view transformations of LastHistory and present the results from both a laband a large-scale online study. We also put listening histories in contrast to other lifelogging data. The resonant and enthusiastic feedback that we received from average users shows a need for making their personal data accessible. We hope to stimulate such developments through this research.},
}
@article{p726,
journal = {IEEE TVCG},
year = 2010,
title = {Uncovering Strengths and Weaknesses of Radial Visualizations---an Empirical Approach},
doi = {10.1109/TVCG.2010.209},
url = {http://dx.doi.org/10.1109/TVCG.2010.209},
author = {Diehl, S. and Beck, F. and Burch, M.},
pages = {935--942},
keywords = {Radial visualization, user study, visual memory},
abstract = {Radial visualizations play an important role in the information visualization community. But the decision to choose a radial coordinate system is rather based on intuition than on scientific foundations. The empirical approach presented in this paper aims at uncovering strengths and weaknesses of radial visualizations by comparing them to equivalent ones in Cartesian coordinate systems. We identified memorizing positions of visual elements as a generic task when working with visualizations. A first study with 674 participants provides a broad data spectrum for exploring differences between the two visualization types. A second, complementing study with fewer participants focuses on further questions raised by the first study. Our findings document that Cartesian visualizations tend to outperform their radial counterparts especially with respect to answer times. Nonetheless, radial visualization seem to be more appropriate for focusing on a particular data dimension.},
}
@article{p727,
journal = {IEEE TVCG},
year = 2010,
title = {Untangling Euler Diagrams},
doi = {10.1109/TVCG.2010.210},
url = {http://dx.doi.org/10.1109/TVCG.2010.210},
author = {Riche, N.H. and Dwyer, T.},
pages = {1090--1099},
keywords = {Information Visualization, Euler diagrams, Set Visualization, Graph Visualization},
abstract = {In many common data analysis scenarios the data elements are logically grouped into sets. Venn and Euler style diagrams are a common visual representation of such set membership where the data elements are represented by labels or glyphs and sets are indicated by boundaries surrounding their members. Generating such diagrams automatically such that set regions do not intersect unless the corresponding sets have a non-empty intersection is a difficult problem. Further, it may be impossible in some cases if regions are required to be continuous and convex. Several approaches exist to draw such set regions using more complex shapes, however, the resulting diagrams can be difficult to interpret. In this paper we present two novel approaches for simplifying a complex collection of intersecting sets into a strict hierarchy that can be more easily automatically arranged and drawn (Figure 1). In the first approach, we use compact rectangular shapes for drawing each set, attempting to improve the readability of the set intersections. In the second approach, we avoid drawing intersecting set regions by duplicating elements belonging to multiple sets. We compared both of our techniques to the traditional non-convex region technique using five readability tasks. Our results show that the compact rectangular shapes technique was often preferred by experimental subjects even though the use of duplications dramatically improves the accuracy and performance time for most of our tasks. In addition to general set representation our techniques are also applicable to visualization of networks with intersecting clusters of nodes.},
}
@article{p728,
journal = {IEEE TVCG},
year = 2010,
title = {Visualization of Diversity in Large Multivariate Data Sets},
doi = {10.1109/TVCG.2010.216},
url = {http://dx.doi.org/10.1109/TVCG.2010.216},
author = {Pham, T. and Hess, R. and Ju, C. and Zhang, E. and Metoyer, R.},
pages = {1053--1062},
keywords = {Information visualization, diversity, categorical data, multivariate data, evaluation},
abstract = {Understanding the diversity of a set of multivariate objects is an important problem in many domains, including ecology, college admissions, investing, machine learning, and others. However, to date, very little work has been done to help users achieve this kind of understanding. Visual representation is especially appealing for this task because it offers the potential to allow users to efficiently observe the objects of interest in a direct and holistic way. Thus, in this paper, we attempt to formalize the problem of visualizing the diversity of a large (more than 1000 objects), multivariate (more than 5 attributes) data set as one worth deeper investigation by the information visualization community. In doing so, we contribute a precise definition of diversity, a set of requirements for diversity visualizations based on this definition, and a formal user study design intended to evaluate the capacity of a visual representation for communicating diversity information. Our primary contribution, however, is a visual representation, called the Diversity Map, for visualizing diversity. An evaluation of the Diversity Map using our study design shows that users can judge elements of diversity consistently and as or more accurately than when using the only other representation specifically designed to visualize diversity.},
}
@article{p729,
journal = {IEEE TVCG},
year = 2010,
title = {Visualization of Graph Products},
doi = {10.1109/TVCG.2010.217},
url = {http://dx.doi.org/10.1109/TVCG.2010.217},
author = {Jänicke, S. and Heine, C. and Hellmuth, M. and Stadler, P.F. and Scheuermann, G.},
pages = {1082--1089},
keywords = {Graph drawing, graph products, TopoLayout},
abstract = {Graphs are a versatile structure and abstraction for binary relationships between objects. To gain insight into such relationships, their corresponding graph can be visualized. In the past, many classes of graphs have been defined, e.g. trees, planar graphs, directed acyclic graphs, and visualization algorithms were proposed for these classes. Although many graphs may only be classified as "general" graphs, they can contain substructures that belong to a certain class. Archambault proposed the TopoLayout framework: rather than draw any arbitrary graph using one method, split the graph into components that are homogeneous with respect to one graph class and then draw each component with an algorithm best suited for this class. Graph products constitute a class that arises frequently in graph theory, but for which no visualization algorithm has been proposed until now. In this paper, we present an algorithm for drawing graph products and the aesthetic criterion graph product's drawings are subject to. We show that the popular High-Dimensional Embedder approach applied to cartesian products already respects this aestetic criterion, but has disadvantages. We also present how our method is integrated as a new component into the TopoLayout framework. Our implementation is used for further research of graph products in a biological context.},
}
@article{p730,
journal = {IEEE TVCG},
year = 2010,
title = {Visualizations everywhere: A Multiplatform Infrastructure for Linked Visualizations},
doi = {10.1109/TVCG.2010.222},
url = {http://dx.doi.org/10.1109/TVCG.2010.222},
author = {Fisher, D. and Drucker, S. and Fernandez, R. and Ruble, S.},
pages = {1157--1163},
keywords = {Visualization systems, toolkit design, data transformation and representation},
abstract = {In order to use new visualizations, most toolkits require application developers to rebuild their applications and distribute new versions to users. The WebCharts Framework take a different approach by hosting Javascript from within an application and providing a standard data and events interchange.. In this way, applications can be extended dynamically, with a wide variety of visualizations. We discuss the benefits of this architectural approach, contrast it to existing techniques, and give a variety of examples and extensions of the basic system.},
}
@article{p842,
journal = {IEEE TVCG},
year = 2009,
title = {"Search, Show Context, Expand on Demand": Supporting Large Graph Exploration with Degree-of-Interest},
doi = {10.1109/TVCG.2009.108},
url = {http://dx.doi.org/10.1109/TVCG.2009.108},
author = {van Ham, F. and Perer, A.},
pages = {953--960},
keywords = {Graph visualization, network visualization, degree of interest, legal citation networks, focus+context},
abstract = {A common goal in graph visualization research is the design of novel techniques for displaying an overview of an entire graph. However, there are many situations where such an overview is not relevant or practical for users, as analyzing the global structure may not be related to the main task of the users that have semi-specific information needs. Furthermore, users accessing large graph databases through an online connection or users running on less powerful (mobile) hardware simply do not have the resources needed to compute these overviews. In this paper, we advocate an interaction model that allows users to remotely browse the immediate context graph around a specific node of interest. We show how Furnas' original degree of interest function can be adapted from trees to graphs and how we can use this metric to extract useful contextual subgraphs, control the complexity of the generated visualization and direct users to interesting datapoints in the context. We demonstrate the effectiveness of our approach with an exploration of a dense online database containing over 3 million legal citations.},
}
@article{p843,
journal = {IEEE TVCG},
year = 2009,
title = {A Comparison of User-Generated and Automatic Graph Layouts},
doi = {10.1109/TVCG.2009.109},
url = {http://dx.doi.org/10.1109/TVCG.2009.109},
author = {Dwyer, T. and Bongshin Lee and Fisher, D. and Quinn, K.I. and Isenberg, P. and Robertson, G. and North, C.},
pages = {961--968},
keywords = {Graph layout, network layout, automatic layout algorithms, user-generated layout, graph-drawing aesthetics},
abstract = {The research presented in this paper compares user-generated and automatic graph layouts. Following the methods suggested by van Ham et al. (2008), a group of users generated graph layouts using both multi-touch interaction on a tabletop display and mouse interaction on a desktop computer. Users were asked to optimize their layout for aesthetics and analytical tasks with a social network. We discuss characteristics of the user-generated layouts and interaction methods employed by users in this process. We then report on a web-based study to compare these layouts with the output of popular automatic layout algorithms. Our results demonstrate that the best of the user-generated layouts performed as well as or better than the physics-based layout. Orthogonal and circular automatic layouts were found to be considerably less effective than either the physics-based layout or the best of the user-generated layouts. We highlight several attributes of the various layouts that led to high accuracy and improved task completion time, as well as aspects in which traditional automatic layout methods were unsuccessful for our tasks.},
}
@article{p844,
journal = {IEEE TVCG},
year = 2009,
title = {A Multi-Threading Architecture to Support Interactive Visual Exploration},
doi = {10.1109/TVCG.2009.110},
url = {http://dx.doi.org/10.1109/TVCG.2009.110},
author = {Piringer, H. and Tominski, C. and Muigg, P. and Berger, W.},
pages = {1113--1120},
keywords = {Information visualization architecture, continuous interaction, multi-threading, layer, preview},
abstract = {During continuous user interaction, it is hard to provide rich visual feedback at interactive rates for datasets containing millions of entries. The contribution of this paper is a generic architecture that ensures responsiveness of the application even when dealing with large data and that is applicable to most types of information visualizations. Our architecture builds on the separation of the main application thread and the visualization thread, which can be cancelled early due to user interaction. In combination with a layer mechanism, our architecture facilitates generating previews incrementally to provide rich visual feedback quickly. To help avoiding common pitfalls of multi-threading, we discuss synchronization and communication in detail. We explicitly denote design choices to control trade-offs. A quantitative evaluation based on the system VI S P L ORE shows fast visual feedback during continuous interaction even for millions of entries. We describe instantiations of our architecture in additional tools.},
}
@article{p845,
journal = {IEEE TVCG},
year = 2009,
title = {A Nested Model for Visualization Design and Validation},
doi = {10.1109/TVCG.2009.111},
url = {http://dx.doi.org/10.1109/TVCG.2009.111},
author = {Munzner, T.},
pages = {921--928},
keywords = {Models, frameworks, design, evaluation},
abstract = {We present a nested model for the visualization design and validation with four layers: characterize the task and data in the vocabulary of the problem domain, abstract into operations and data types, design visual encoding and interaction techniques, and create algorithms to execute techniques efficiently. The output from a level above is input to the level below, bringing attention to the design challenge that an upstream error inevitably cascades to all downstream levels. This model provides prescriptive guidance for determining appropriate evaluation approaches by identifying threats to validity unique to each level. We also provide three recommendations motivated by this model: authors should distinguish between these levels when claiming contributions at more than one of them, authors should explicitly state upstream assumptions at levels above the focus of a paper, and visualization venues should accept more papers on domain characterization.},
}
@article{p846,
journal = {IEEE TVCG},
year = 2009,
title = {ABySS-Explorer: Visualizing Genome Sequence Assemblies},
doi = {10.1109/TVCG.2009.116},
url = {http://dx.doi.org/10.1109/TVCG.2009.116},
author = {Nielsen, C.B. and Jackman, S.D. and Birol, I. and Jones, S.J.M.},
pages = {881--888},
keywords = {Bioinformatics visualization, design study, DNA sequence, genome assembly},
abstract = {One bottleneck in large-scale genome sequencing projects is reconstructing the full genome sequence from the short subsequences produced by current technologies. The final stages of the genome assembly process inevitably require manual inspection of data inconsistencies and could be greatly aided by visualization. This paper presents our design decisions in translating key data features identified through discussions with analysts into a concise visual encoding. Current visualization tools in this domain focus on local sequence errors making high-level inspection of the assembly difficult if not impossible. We present a novel interactive graph display, ABySS-Explorer, that emphasizes the global assembly structure while also integrating salient data features such as sequence length. Our tool replaces manual and in some cases pen-and-paper based analysis tasks, and we discuss how user feedback was incorporated into iterative design refinements. Finally, we touch on applications of this representation not initially considered in our design phase, suggesting the generality of this encoding for DNA sequence data.},
}
@article{p847,
journal = {IEEE TVCG},
year = 2009,
title = {ActiviTree: Interactive Visual Exploration of Sequences in Event-Based Data Using Graph Similarity},
doi = {10.1109/TVCG.2009.117},
url = {http://dx.doi.org/10.1109/TVCG.2009.117},
author = {Vrotsou, K. and Johansson, J. and Cooper, M.},
pages = {945--952},
keywords = {Interactive visual exploration, event-based data, sequence identification, graph similarity, node similarity},
abstract = {The identification of significant sequences in large and complex event-based temporal data is a challenging problem with applications in many areas of today's information intensive society. Pure visual representations can be used for the analysis, but are constrained to small data sets. Algorithmic search mechanisms used for larger data sets become expensive as the data size increases and typically focus on frequency of occurrence to reduce the computational complexity, often overlooking important infrequent sequences and outliers. In this paper we introduce an interactive visual data mining approach based on an adaptation of techniques developed for Web searching, combined with an intuitive visual interface, to facilitate user-centred exploration of the data and identification of sequences significant to that user. The search algorithm used in the exploration executes in negligible time, even for large data, and so no pre-processing of the selected data is required, making this a completely interactive experience for the user. Our particular application area is social science diary data but the technique is applicable across many other disciplines.},
}
@article{p848,
journal = {IEEE TVCG},
year = 2009,
title = {Bubble Sets: Revealing Set Relations with Isocontours over Existing Visualizations},
doi = {10.1109/TVCG.2009.122},
url = {http://dx.doi.org/10.1109/TVCG.2009.122},
author = {Collins, C. and Penn, G. and Carpendale, S.},
pages = {1009--1016},
keywords = {clustering, spatial layout, graph visualization, tree visualization},
abstract = {While many data sets contain multiple relationships, depicting more than one data relationship within a single visualization is challenging. We introduce Bubble Sets as a visualization technique for data that has both a primary data relation with a semantically significant spatial organization and a significant set membership relation in which members of the same set are not necessarily adjacent in the primary layout. In order to maintain the spatial rights of the primary data relation, we avoid layout adjustment techniques that improve set cluster continuity and density. Instead, we use a continuous, possibly concave, isocontour to delineate set membership, without disrupting the primary layout. Optimizations minimize cluster overlap and provide for calculation of the isocontours at interactive speeds. Case studies show how this technique can be used to indicate multiple sets on a variety of common visualizations.},
}
@article{p849,
journal = {IEEE TVCG},
year = 2009,
title = {code_swarm: A Design Study in Organic Software Visualization},
doi = {10.1109/TVCG.2009.123},
url = {http://dx.doi.org/10.1109/TVCG.2009.123},
author = {Ogawa, M. and Kwan-Liu Ma},
pages = {1097--1104},
keywords = {Software visualization, organic information visualization, software development history and evolution},
abstract = {In May of 2008, we published online a series of software visualization videos using a method called code_swarm. Shortly thereafter, we made the code open source and its popularity took off. This paper is a study of our code swarm application, comprising its design, results and public response. We share our design methodology, including why we chose the organic information visualization technique, how we designed for both developers and a casual audience, and what lessons we learned from our experiment. We validate the results produced by code_swarm through a qualitative analysis and by gathering online user comments. Furthermore, we successfully released the code as open source, and the software community used it to visualize their own projects and shared their results as well. In the end, we believe code_swarm has positive implications for the future of organic information design and open source information visualization practice.},
}
@article{p850,
journal = {IEEE TVCG},
year = 2009,
title = {Comparing Dot and Landscape Spatializations for Visual Memory Differences},
doi = {10.1109/TVCG.2009.127},
url = {http://dx.doi.org/10.1109/TVCG.2009.127},
author = {Tory, M. and Swindells, C. and Dreezer, R.},
pages = {1033--1040},
keywords = {Information interfaces and presentation, screen design, evaluation / methodology, user / machine systems, software psychology, landscape visualization},
abstract = {Spatialization displays use a geographic metaphor to arrange non-spatial data. For example, spatializations are commonly applied to document collections so that document themes appear as geographic features such as hills. Many common spatialization interfaces use a 3-D landscape metaphor to present data. However, it is not clear whether 3-D spatializations afford improved speed and accuracy for user tasks compared to similar 2-D spatializations. We describe a user study comparing users' ability to remember dot displays, 2-D landscapes, and 3-D landscapes for two different data densities (500 vs. 1000 points). Participants' visual memory was statistically more accurate when viewing dot displays and 3-D landscapes compared to 2-D landscapes. Furthermore, accuracy remembering a spatialization was significantly better overall for denser spatializations. Theseresults are of benefit to visualization designers who are contemplating the best ways to present data using spatialization techniques.},
}
@article{p851,
journal = {IEEE TVCG},
year = 2009,
title = {Configuring Hierarchical Layouts to Address Research Questions},
doi = {10.1109/TVCG.2009.128},
url = {http://dx.doi.org/10.1109/TVCG.2009.128},
author = {Slingsby, A. and Dykes, J. and Wood, J.},
pages = {977--984},
keywords = {Geovisualization, hierarchical, layout, guidelines, exploratory, notation},
abstract = {We explore the effects of selecting alternative layouts in hierarchical displays that show multiple aspects of large multivariate datasets, including spatial and temporal characteristics. Hierarchical displays of this type condition a dataset by multiple discrete variable values, creating nested graphical summaries of the resulting subsets in which size, shape and colour can be used to show subset properties. These 'small multiples' are ordered by the conditioning variable values and are laid out hierarchically using dimensional stacking. Crucially, we consider the use of different layouts at different hierarchical levels, so that the coordinates of the plane can be used more effectively to draw attention to trends and anomalies in the data. We argue that these layouts should be informed by the type of conditioning variable and by the research question being explored. We focus on space-filling rectangular layouts that provide data-dense and rich overviews of data to address research questions posed in our exploratory analysis of spatial and temporal aspects of property sales in London. We develop a notation ('HiVE') that describes visualisation and layout states and provides reconfiguration operators, demonstrate its use for reconfiguring layouts to pursue research questions and provide guidelines for this process. We demonstrate how layouts can be related through animated transitions to reduce the cognitive load associated with their reconfiguration whilst supporting the exploratory process.},
}
@article{p852,
journal = {IEEE TVCG},
year = 2009,
title = {Conjunctive Visual Forms},
doi = {10.1109/TVCG.2009.129},
url = {http://dx.doi.org/10.1109/TVCG.2009.129},
author = {Weaver, C.},
pages = {929--936},
keywords = {Boolean query, brushing, conjunctive normal form, exploratory visualization, multiple views, visual abstraction},
abstract = {Visual exploration of multidimensional data is a process of isolating and extracting relationships within and between dimensions. Coordinated multiple view approaches are particularly effective for visual exploration because they support precise expression of heterogeneous multidimensional queries using simple interactions. Recent visual analytics research has made significant progress in identifying and understanding patterns of composed views and coordinations that support fast, flexible, and open-ended data exploration. What is missing is formalization of the space of expressible queries in terms of visual representation and interaction. This paper introduces the conjunctive visual form model in which visual exploration consists of interactively-driven sequences of transitions between visual states that correspond to conjunctive normal forms in boolean logic. The model predicts several new and useful ways to extend the space of rapidly expressible queries through addition of simple interactive capabilities to existing compositional patterns. Two recent related visual tools offer a subset of these capabilities, providing a basis for conjecturing about such extensions.},
}
@article{p853,
journal = {IEEE TVCG},
year = 2009,
title = {Constructing Overview + Detail Dendrogram-Matrix Views},
doi = {10.1109/TVCG.2009.130},
url = {http://dx.doi.org/10.1109/TVCG.2009.130},
author = {Jin Chen and MacEachren, A.M. and Peuquet, D.},
pages = {889--896},
keywords = {Dendrogram, reorderable matrix, compound graphs, data abstraction quality metrics, hierarchical clusters},
abstract = {A dendrogram that visualizes a clustering hierarchy is often integrated with a re-orderable matrix for pattern identification. The method is widely used in many research fields including biology, geography, statistics, and data mining. However, most dendrograms do not scale up well, particularly with respect to problems of graphical and cognitive information overload. This research proposes a strategy that links an overview dendrogram and a detail-view dendrogram, each integrated with a re-orderable matrix. The overview displays only a user-controlled, limited number of nodes that represent the ldquoskeletonrdquo of a hierarchy. The detail view displays the sub-tree represented by a selected meta-node in the overview. The research presented here focuses on constructing a concise overview dendrogram and its coordination with a detail view. The proposed method has the following benefits: dramatic alleviation of information overload, enhanced scalability and data abstraction quality on the dendrogram, and the support of data exploration at arbitrary levels of detail. The contribution of the paper includes a new metric to measure the ldquoimportancerdquo of nodes in a dendrogram; the method to construct the concise overview dendrogram from the dynamically-identified, important nodes; and measure for evaluating the data abstraction quality for dendrograms. We evaluate and compare the proposed method to some related existing methods, and demonstrating how the proposed method can help users find interesting patterns through a case study on county-level U.S. cervical cancer mortality and demographic data.},
}
@article{p854,
journal = {IEEE TVCG},
year = 2009,
title = {Document Cards: A Top Trumps Visualization for Documents},
doi = {10.1109/TVCG.2009.139},
url = {http://dx.doi.org/10.1109/TVCG.2009.139},
author = {Strobelt, H. and Oelke, D. and Rohrdantz, C. and Stoffel, A. and Keim, D.A. and Deussen, O.},
pages = {1145--1152},
keywords = {document visualization, visual summary, content extraction, document collection browsing},
abstract = {Finding suitable, less space consuming views for a document's main content is crucial to provide convenient access to large document collections on display devices of different size. We present a novel compact visualization which represents the document's key semantic as a mixture of images and important key terms, similar to cards in a top trumps game. The key terms are extracted using an advanced text mining approach based on a fully automatic document structure extraction. The images and their captions are extracted using a graphical heuristic and the captions are used for a semi-semantic image weighting. Furthermore, we use the image color histogram for classification and show at least one representative from each non-empty image class. The approach is demonstrated for the IEEE InfoVis publications of a complete year. The method can easily be applied to other publication collections and sets of documents which contain images.},
}
@article{p855,
journal = {IEEE TVCG},
year = 2009,
title = {Exemplar-based Visualization of Large Document Corpus},
doi = {10.1109/TVCG.2009.140},
url = {http://dx.doi.org/10.1109/TVCG.2009.140},
author = {Yanhua Chen and Lijun Wang and Ming Dong and Jing Hua},
pages = {1161--1168},
keywords = {Exemplar, large-scale document visualization, multidimensional projection},
abstract = {With the rapid growth of the World Wide Web and electronic information services, text corpus is becoming available online at an incredible rate. By displaying text data in a logical layout (e.g., color graphs), text visualization presents a direct way to observe the documents as well as understand the relationship between them. In this paper, we propose a novel technique, Exemplar-based visualization (EV), to visualize an extremely large text corpus. Capitalizing on recent advances in matrix approximation and decomposition, EV presents a probabilistic multidimensional projection model in the low-rank text subspace with a sound objective function. The probability of each document proportion to the topics is obtained through iterative optimization and embedded to a low dimensional space using parameter embedding. By selecting the representative exemplars, we obtain a compact approximation of the data. This makes the visualization highly efficient and flexible. In addition, the selected exemplars neatly summarize the entire data set and greatly reduce the cognitive overload in the visualization, leading to an easier interpretation of large text corpus. Empirically, we demonstrate the superior performance of EV through extensive experiments performed on the publicly available text data sets.},
}
@article{p856,
journal = {IEEE TVCG},
year = 2009,
title = {Flow Mapping and Multivariate Visualization of Large Spatial Interaction Data},
doi = {10.1109/TVCG.2009.143},
url = {http://dx.doi.org/10.1109/TVCG.2009.143},
author = {Diansheng Guo},
pages = {1041--1048},
keywords = {hierarchical clustering, graph partitioning, flow mapping, spatial interaction, contiguity constraints, multidimensional visualization, coordinated views, data mining},
abstract = {Spatial interactions (or flows), such as population migration and disease spread, naturally form a weighted location-to-location network (graph). Such geographically embedded networks (graphs) are usually very large. For example, the county-to-county migration data in the U.S. has thousands of counties and about a million migration paths. Moreover, many variables are associated with each flow, such as the number of migrants for different age groups, income levels, and occupations. It is a challenging task to visualize such data and discover network structures, multivariate relations, and their geographic patterns simultaneously. This paper addresses these challenges by developing an integrated interactive visualization framework that consists three coupled components: (1) a spatially constrained graph partitioning method that can construct a hierarchy of geographical regions (communities), where there are more flows or connections within regions than across regions; (2) a multivariate clustering and visualization method to detect and present multivariate patterns in the aggregated region-to-region flows; and (3) a highly interactive flow mapping component to map both flow and multivariate patterns in the geographic space, at different hierarchical levels. The proposed approach can process relatively large data sets and effectively discover and visualize major flow structures and multivariate relations at the same time. User interactions are supported to facilitate the understanding of both an overview and detailed patterns.},
}
@article{p857,
journal = {IEEE TVCG},
year = 2009,
title = {FromDaDy: Spreading Aircraft Trajectories Across Views to Support Iterative Queries},
doi = {10.1109/TVCG.2009.145},
url = {http://dx.doi.org/10.1109/TVCG.2009.145},
author = {Hurter, C. and Tissoires, B. and Conversy, S.},
pages = {1017--1024},
keywords = {visualization, iterative exploration, direct manipulation, trajectories},
abstract = {When displaying thousands of aircraft trajectories on a screen, the visualization is spoiled by a tangle of trails. The visual analysis is therefore difficult, especially if a specific class of trajectories in an erroneous dataset has to be studied. We designed FromDaDy, a trajectory visualization tool that tackles the difficulties of exploring the visualization of multiple trails. This multidimensional data exploration is based on scatterplots, brushing, pick and drop, juxtaposed views and rapid visual design. Users can organize the workspace composed of multiple juxtaposed views. They can define the visual configuration of the views by connecting data dimensions from the dataset to Bertin's visual variables. They can then brush trajectories, and with a pick and drop operation they can spread the brushed information across views. They can then repeat these interactions, until they extract a set of relevant data, thus formulating complex queries. Through two real-world scenarios, we show how FromDaDy supports iterative queries and the extraction of trajectories in a dataset that contains up to 5 million data.},
}
@article{p858,
journal = {IEEE TVCG},
year = 2009,
title = {GeneShelf: A Web-based Visual Interface for Large Gene Expression Time-Series Data Repositories},
doi = {10.1109/TVCG.2009.146},
url = {http://dx.doi.org/10.1109/TVCG.2009.146},
author = {Bohyoung Kim and Bongshin Lee and Knoblach, S. and Hoffman, E. and Jinwook Seo},
pages = {905--912},
keywords = {bioinformatics visualization, augmented timeline, animation, zoomable grid, gene expression profiling},
abstract = {A widespread use of high-throughput gene expression analysis techniques enabled the biomedical research community to share a huge body of gene expression datasets in many public databases on the web. However, current gene expression data repositories provide static representations of the data and support limited interactions. This hinders biologists from effectively exploring shared gene expression datasets. Responding to the growing need for better interfaces to improve the utility of the public datasets, we have designed and developed a new web-based visual interface entitled GeneShelf (http://bioinformatics.cnmcresearch.org/GeneShelf). It builds upon a zoomable grid display to represent two categorical dimensions. It also incorporates an augmented timeline with expandable time points that better shows multiple data values for the focused time point by embedding bar charts. We applied GeneShelf to one of the largest microarray datasets generated to study the progression and recovery process of injuries at the spinal cord of mice and rats. We present a case study and a preliminary qualitative user study with biologists to show the utility and usability of GeneShelf.},
}
@article{p859,
journal = {IEEE TVCG},
year = 2009,
title = {Harnessing the Information Ecosystem with Wiki-based Visualization Dashboards},
doi = {10.1109/TVCG.2009.148},
url = {http://dx.doi.org/10.1109/TVCG.2009.148},
author = {McKeon, M.},
pages = {1081--1088},
keywords = {visualization, web, social software, wikis, social data analysis, collaboration, dashboards, visual analytics},
abstract = {We describe the design and deployment of Dashiki, a public Website where users may collaboratively build visualization dashboards through a combination of a wiki-like syntax and interactive editors. Our goals are to extend existing research on social data analysis into presentation and organization of data from multiple sources, explore new metaphors for these activities, and participate more fully in the Web's information ecology by providing tighter integration with real-time data. To support these goals, our design includes novel and low-barrier mechanisms for editing and layout of dashboard pages and visualizations, connection to data sources, and coordinating interaction between visualizations. In addition to describing these technologies, we provide a preliminary report on the public launch of a prototype based on this design, including a description of the activities of our users derived from observation and interviews.},
}
@article{p860,
journal = {IEEE TVCG},
year = 2009,
title = {Interaction Techniques for Selecting and Manipulating Subgraphs in Network Visualizations},
doi = {10.1109/TVCG.2009.151},
url = {http://dx.doi.org/10.1109/TVCG.2009.151},
author = {McGuffin, M.J. and Jurisica, I.},
pages = {937--944},
keywords = {Interactive graph drawing, network layout, radial menus, marking menus, hotbox, biological networks},
abstract = {We present a novel and extensible set of interaction techniques for manipulating visualizations of networks by selecting subgraphs and then applying various commands to modify their layout or graphical properties. Our techniques integrate traditional rectangle and lasso selection, and also support selecting a node's neighbourhood by dragging out its radius (in edges) using a novel kind of radial menu. Commands for translation, rotation, scaling, or modifying graphical properties (such as opacity) and layout patterns can be performed by using a hotbox (a transiently popped-up, semi-transparent set of widgets) that has been extended in novel ways to integrate specification of commands with 1D or 2D arguments. Our techniques require only one mouse button and one keyboard key, and are designed for fast, gestural, in-place interaction. We present the design and integration of these interaction techniques, and illustrate their use in interactive graph visualization. Our techniques are implemented in NAViGaTOR, a software package for visualizing and analyzing biological networks. An initial usability study is also reported.},
}
@article{p861,
journal = {IEEE TVCG},
year = 2009,
title = {Interactive Dimensionality Reduction Through User-defined Combinations of Quality Metrics},
doi = {10.1109/TVCG.2009.153},
url = {http://dx.doi.org/10.1109/TVCG.2009.153},
author = {Johansson, S. and Johansson, J.},
pages = {993--1000},
keywords = {dimensionality reduction, interactivity, quality metrics, variable ordering},
abstract = {Multivariate data sets including hundreds of variables are increasingly common in many application areas. Most multivariate visualization techniques are unable to display such data effectively, and a common approach is to employ dimensionality reduction prior to visualization. Most existing dimensionality reduction systems focus on preserving one or a few significant structures in data. For many analysis tasks, however, several types of structures can be of high significance and the importance of a certain structure compared to the importance of another is often task-dependent. This paper introduces a system for dimensionality reduction by combining user-defined quality metrics using weight functions to preserve as many important structures as possible. The system aims at effective visualization and exploration of structures within large multivariate data sets and provides enhancement of diverse structures by supplying a range of automatic variable orderings. Furthermore it enables a quality-guided reduction of variables through an interactive display facilitating investigation of trade-offs between loss of structure and the number of variables to keep. The generality and interactivity of the system is demonstrated through a case scenario.},
}
@article{p862,
journal = {IEEE TVCG},
year = 2009,
title = {Lark: Coordinating Co-located Collaboration with Information Visualization},
doi = {10.1109/TVCG.2009.162},
url = {http://dx.doi.org/10.1109/TVCG.2009.162},
author = {Tobiasz, M. and Isenberg, P. and Carpendale, S.},
pages = {1065--1072},
keywords = {Information visualization, Meta-visualization, Collaboration, Coordination, Co-located work, Workspace awareness},
abstract = {Large multi-touch displays are expanding the possibilities of multiple-coordinated views by allowing multiple people to interact with data in concert or independently. We present Lark, a system that facilitates the coordination of interactions with information visualizations on shared digital workspaces. We focus on supporting this coordination according to four main criteria: scoped interaction, temporal flexibility, spatial flexibility, and changing collaboration styles. These are achieved by integrating a representation of the information visualization pipeline into the shared workspace, thus explicitly indicating coordination points on data, representation, presentation, and view levels. This integrated meta-visualization supports both the awareness of how views are linked and the freedom to work in concert or independently. Lark incorporates these four main criteria into a coherent visualization collaboration interaction environment by providing direct visual and algorithmic support for the coordination of data analysis actions over shared large displays.},
}
@article{p863,
journal = {IEEE TVCG},
year = 2009,
title = {Mapping Text with Phrase Nets},
doi = {10.1109/TVCG.2009.165},
url = {http://dx.doi.org/10.1109/TVCG.2009.165},
author = {van Ham, F. and Wattenberg, M. and Viegas, F.B.},
pages = {1169--1176},
keywords = {Text visualization, tag cloud, natural language processing, semantic net},
abstract = {We present a new technique, the phrase net, for generating visual overviews of unstructured text. A phrase net displays a graph whose nodes are words and whose edges indicate that two words are linked by a user-specified relation. These relations may be defined either at the syntactic or lexical level; different relations often produce very different perspectives on the same text. Taken together, these perspectives often provide an illuminating visual overview of the key concepts and relations in a document or set of documents.},
}
@article{p864,
journal = {IEEE TVCG},
year = 2009,
title = {MizBee: A Multiscale Synteny Browser},
doi = {10.1109/TVCG.2009.167},
url = {http://dx.doi.org/10.1109/TVCG.2009.167},
author = {Meyer, M. and Munzner, T. and Pfister, H.},
pages = {897--904},
keywords = {Information visualization, design study, bioinformatics, synteny},
abstract = {In the field of comparative genomics, scientists seek to answer questions about evolution and genomic function by comparing the genomes of species to find regions of shared sequences. Conserve dsyntenic blocks are an important biological data abstraction for indicating regions of shared sequences. The goal of this work is to show multiple types of relationships at multiple scales in a way that is visually comprehensible in accordance with known perceptual principles. We present a task analysis for this domain where the fundamental questions asked by biologists can be understood by a characterization of relationships into the four types of proximity/location, size, orientation, and similarity/strength, and the four scales of genome, chromosome, block, and genomic feature. We also propose a new taxonomy of the design space for visually encoding conservation data. We present MizBee, a multiscale synteny browser with the unique property of providing interactive side-by-side views of the data across the range of scales supporting exploration of all of these relationship types. We conclude with case studies from two biologists who used MizBee to augment their previous automatic analysis work flow, providing anecdotal evidence about the efficacy of the system for the visualization of syntenic data, the analysis of conservation relationships, and the communication of scientific insights.},
}
@article{p865,
journal = {IEEE TVCG},
year = 2009,
title = {Participatory Visualization with Wordle},
doi = {10.1109/TVCG.2009.171},
url = {http://dx.doi.org/10.1109/TVCG.2009.171},
author = {Viegas, F.B. and Wattenberg, M. and Feinberg, J.},
pages = {1137--1144},
keywords = {Visualization, text, tag cloud, participatory culture, memory, educational visualization, social data analysis},
abstract = {We discuss the design and usage of ldquoWordle,rdquo a Web-based tool for visualizing text. Wordle creates tag-cloud-like displays that give careful attention to typography, color, and composition. We describe the algorithms used to balance various aesthetic criteria and create the distinctive Wordle layouts. We then present the results of a study of Wordle usage, based both on spontaneous behaviour observed in the wild, and on a large-scale survey of Wordle users. The results suggest that Wordles have become a kind of medium of expression, and that a ldquoparticipatory culturerdquo has arisen around them.},
}
@article{p866,
journal = {IEEE TVCG},
year = 2009,
title = {Protovis: A Graphical Toolkit for Visualization},
doi = {10.1109/TVCG.2009.174},
url = {http://dx.doi.org/10.1109/TVCG.2009.174},
author = {Bostock, M. and Heer, J.},
pages = {1121--1128},
keywords = {Information visualization, user interfaces, toolkits, 2D graphics},
abstract = {Despite myriad tools for visualizing data, there remains a gap between the notational efficiency of high-level visualization systems and the expressiveness and accessibility of low-level graphical systems. Powerful visualization systems may be inflexible or impose abstractions foreign to visual thinking, while graphical systems such as rendering APIs and vector-based drawing programs are tedious for complex work. We argue that an easy-to-use graphical system tailored for visualization is needed. In response, we contribute Protovis, an extensible toolkit for constructing visualizations by composing simple graphical primitives. In Protovis, designers specify visualizations as a hierarchy of marks with visual properties defined as functions of data. This representation achieves a level of expressiveness comparable to low-level graphics systems, while improving efficiency - the effort required to specify a visualization - and accessibility - the effort required to learn and modify the representation. We substantiate this claim through a diverse collection of examples and comparative analysis with popular visualization tools.},
}
@article{p867,
journal = {IEEE TVCG},
year = 2009,
title = {ResultMaps: Visualization for Search Interfaces},
doi = {10.1109/TVCG.2009.176},
url = {http://dx.doi.org/10.1109/TVCG.2009.176},
author = {Clarkson, E. and Desai, K. and Foley, J.D.},
pages = {1057--1064},
keywords = {Treemap, evaluation, user studies, digital library, digital repository, search engine, search visualization, infovis},
abstract = {Hierarchical representations are common in digital repositories, yet are not always fully leveraged in their online search interfaces. This work describes ResultMaps, which use hierarchical treemap representations with query string-driven digital library search engines. We describe two lab experiments, which find that ResultsMap users yield significantly better results over a control condition on some subjective measures, and we find evidence that ResultMaps have ancillary benefits via increased understanding of some aspects of repository content. The ResultMap system and experiments contribute an understanding of the benefits-direct and indirect-of the ResultMap approach to repository search visualization.},
}
@article{p868,
journal = {IEEE TVCG},
year = 2009,
title = {Scattering Points in Parallel Coordinates},
doi = {10.1109/TVCG.2009.179},
url = {http://dx.doi.org/10.1109/TVCG.2009.179},
author = {Xiaoru Yuan and Peihong Guo and He Xiao and Hong Zhou and Huamin Qu},
pages = {1001--1008},
keywords = {Parallel Coordinates, Scatterplots, Information Visualization, Multidimensional Scaling},
abstract = {In this paper, we present a novel parallel coordinates design integrated with points (scattering points in parallel coordinates, SPPC), by taking advantage of both parallel coordinates and scatterplots. Different from most multiple views visualization frameworks involving parallel coordinates where each visualization type occupies an individual window, we convert two selected neighboring coordinate axes into a scatterplot directly. Multidimensional scaling is adopted to allow converting multiple axes into a single subplot. The transition between two visual types is designed in a seamless way. In our work, a series of interaction tools has been developed. Uniform brushing functionality is implemented to allow the user to perform data selection on both points and parallel coordinate polylines without explicitly switching tools. A GPU accelerated dimensional incremental multidimensional scaling (DIMDS) has been developed to significantly improve the system performance. Our case study shows that our scheme is more efficient than traditional multi-view methods in performing visual analysis tasks.},
}
@article{p869,
journal = {IEEE TVCG},
year = 2009,
title = {SellTrend: Inter-Attribute Visual Analysis of Temporal Transaction Data},
doi = {10.1109/TVCG.2009.180},
url = {http://dx.doi.org/10.1109/TVCG.2009.180},
author = {Zhicheng Liu and Stasko, J. and Sullivan, T.},
pages = {1025--1032},
keywords = {investigative analysis, transaction analysis, information visualization, multiple views, time series data, multiple attributes, categorical data},
abstract = {We present a case study of our experience designing SellTrend, a visualization system for analyzing airline travel purchase requests. The relevant transaction data can be characterized as multi-variate temporal and categorical event sequences, and the chief problem addressed is how to help company analysts identify complex combinations of transaction attributes that contribute to failed purchase requests. SellTrend combines a diverse set of techniques ranging from time series visualization to faceted browsing and historical trend analysis in order to help analysts make sense of the data. We believe that the combination of views and interaction capabilities in SellTrend provides an innovative approach to this problem and to other similar types of multivariate, temporally driven transaction data analysis. Initial feedback from company analysts confirms the utility and benefits of the system.},
}
@article{p870,
journal = {IEEE TVCG},
year = 2009,
title = {Smooth Graphs for Visual Exploration of Higher-Order State Transitions},
doi = {10.1109/TVCG.2009.181},
url = {http://dx.doi.org/10.1109/TVCG.2009.181},
author = {Blaas, J. and Botha, C.P. and Grundy, E. and Jones, M.W. and Laramee, R.S. and Post, F.H.},
pages = {969--976},
keywords = {State transitions, Graph drawing, Time series, Biological data},
abstract = {In this paper, we present a new visual way of exploring state sequences in large observational time-series. A key advantage of our method is that it can directly visualize higher-order state transitions. A standard first order state transition is a sequence of two states that are linked by a transition. A higher-order state transition is a sequence of three or more states where the sequence of participating states are linked together by consecutive first order state transitions. Our method extends the current state-graph exploration methods by employing a two dimensional graph, in which higher-order state transitions are visualized as curved lines. All transitions are bundled into thick splines, so that the thickness of an edge represents the frequency of instances. The bundling between two states takes into account the state transitions before and after the transition. This is done in such a way that it forms a continuous representation in which any subsequence of the timeseries is represented by a continuous smooth line. The edge bundles in these graphs can be explored interactively through our incremental selection algorithm. We demonstrate our method with an application in exploring labeled time-series data from a biological survey, where a clustering has assigned a single label to the data at each time-point. In these sequences, a large number of cyclic patterns occur, which in turn are linked to specific activities. We demonstrate how our method helps to find these cycles, and how the interactive selection process helps to find and investigate activities.},
}
@article{p871,
journal = {IEEE TVCG},
year = 2009,
title = {Spatiotemporal Analysis of Sensor Logs using Growth Ring Maps},
doi = {10.1109/TVCG.2009.182},
url = {http://dx.doi.org/10.1109/TVCG.2009.182},
author = {Bak, P. and Mansmann, F. and Janetzko, H. and Keim, D.A.},
pages = {913--920},
keywords = {spatiotemporal visualization, visual analytics, animal behavior, dense pixel displays},
abstract = {Spatiotemporal analysis of sensor logs is a challenging research field due to three facts: a) traditional two-dimensional maps do not support multiple events to occur at the same spatial location, b) three-dimensional solutions introduce ambiguity and are hard to navigate, and c) map distortions to solve the overlap problem are unfamiliar to most users. This paper introduces a novel approach to represent spatial data changing over time by plotting a number of non-overlapping pixels, close to the sensor positions in a map. Thereby, we encode the amount of time that a subject spent at a particular sensor to the number of plotted pixels. Color is used in a twofold manner; while distinct colors distinguish between sensor nodes in different regions, the colors' intensity is used as an indicator to the temporal property of the subjects' activity. The resulting visualization technique, called growth ring maps, enables users to find similarities and extract patterns of interest in spatiotemporal data by using humans' perceptual abilities. We demonstrate the newly introduced technique on a dataset that shows the behavior of healthy and Alzheimer transgenic, male and female mice. We motivate the new technique by showing that the temporal analysis based on hierarchical clustering and the spatial analysis based on transition matrices only reveal limited results. Results and findings are cross-validated using multidimensional scaling. While the focus of this paper is to apply our visualization for monitoring animal behavior, the technique is also applicable for analyzing data, such as packet tracing, geographic monitoring of sales development, or mobile phone capacity planning.},
}
@article{p872,
journal = {IEEE TVCG},
year = 2009,
title = {SpicyNodes: Radial Layout Authoring for the General Public},
doi = {10.1109/TVCG.2009.183},
url = {http://dx.doi.org/10.1109/TVCG.2009.183},
author = {Douma, M. and Ligierko, G. and Ancuta, O. and Gritsai, P. and Liu, S.},
pages = {1089--1096},
keywords = {Trees and network visualization, radial tree layout, information visualization, interaction, focus+context, hierarchy visualization, human-computer interaction},
abstract = {Trees and graphs are relevant to many online tasks such as visualizing social networks, product catalogs, educational portals, digital libraries, the semantic web, concept maps and personalized information management. SpicyNodes is an information-visualization technology that builds upon existing research on radial tree layouts and graph structures. Users can browse a tree, clicking from node to node, as well as successively viewing a node, immediately related nodes and the path back to the ldquohomerdquo nodes. SpicyNodes' layout algorithms maintain balanced layouts using a hybrid mixture of a geometric layout (a succession of spanning radial trees) and force-directed layouts to minimize overlapping nodes, plus several other improvements over prior art. It provides XML-based API and GUI authoring tools. The goal of the SpicyNodes project is to implement familiar principles of radial maps and focus+context with an attractive and inviting look and feel in an open system that is accessible to virtually any Internet user.},
}
@article{p873,
journal = {IEEE TVCG},
year = 2009,
title = {Temporal Summaries: Supporting Temporal Categorical Searching, Aggregation and Comparison},
doi = {10.1109/TVCG.2009.187},
url = {http://dx.doi.org/10.1109/TVCG.2009.187},
author = {Wang, T.D. and Plaisant, C. and Shneiderman, B. and Spring, N. and Roseman, D. and Marchand, G. and Mukherjee, V. and Smith, M.},
pages = {1049--1056},
keywords = {Information Visualization, Interaction design, Human-computer interaction, temporal categorical data visualization},
abstract = {When analyzing thousands of event histories, analysts often want to see the events as an aggregate to detect insights and generate new hypotheses about the data. An analysis tool must emphasize both the prevalence and the temporal ordering of these events. Additionally, the analysis tool must also support flexible comparisons to allow analysts to gather visual evidence. In a previous work, we introduced align, rank, and filter (ARF) to accentuate temporal ordering. In this paper, we present temporal summaries, an interactive visualization technique that highlights the prevalence of event occurrences. Temporal summaries dynamically aggregate events in multiple granularities (year, month, week, day, hour, etc.) for the purpose of spotting trends over time and comparing several groups of records. They provide affordances for analysts to perform temporal range filters. We demonstrate the applicability of this approach in two extensive case studies with analysts who applied temporal summaries to search, filter, and look for patterns in electronic health records and academic records.},
}
@article{p874,
journal = {IEEE TVCG},
year = 2009,
title = {The Benefits of Synchronous Collaborative Information Visualization: Evidence from an Experimental Evaluation},
doi = {10.1109/TVCG.2009.188},
url = {http://dx.doi.org/10.1109/TVCG.2009.188},
author = {Bresciani, S. and Eppler, M.J.},
pages = {1073--1080},
keywords = {Laboratory Studies, Visual Knowledge Representation, Collaborative and Distributed Visualization, synchronous situated collaboration, group work, experiment, knowledge sharing},
abstract = {A great corpus of studies reports empirical evidence of how information visualization supports comprehension and analysis of data. The benefits of visualization for synchronous group knowledge work, however, have not been addressed extensively. Anecdotal evidence and use cases illustrate the benefits of synchronous collaborative information visualization, but very few empirical studies have rigorously examined the impact of visualization on group knowledge work. We have consequently designed and conducted an experiment in which we have analyzed the impact of visualization on knowledge sharing in situated work groups. Our experimental study consists of evaluating the performance of 131 subjects (all experienced managers) in groups of 5 (for a total of 26 groups), working together on a real-life knowledge sharing task. We compare (1) the control condition (no visualization provided), with two visualization supports: (2) optimal and (3) suboptimal visualization (based on a previous survey). The facilitator of each group was asked to populate the provided interactive visual template with insights from the group, and to organize the contributions according to the group consensus. We have evaluated the results through both objective and subjective measures. Our statistical analysis clearly shows that interactive visualization has a statistically significant, objective and positive impact on the outcomes of knowledge sharing, but that the subjects seem not to be aware of this. In particular, groups supported by visualization achieved higher productivity, higher quality of outcome and greater knowledge gains. No statistically significant results could be found between an optimal and a suboptimal visualization though (as classified by the pre-experiment survey). Subjects also did not seem to be aware of the benefits that the visualizations provided as no difference between the visualization and the control conditions was found for the self-reported measures of satisfaction a- - nd participation. An implication of our study for information visualization applications is to extend them by using real-time group annotation functionalities that aid in the group sense making process of the represented data.},
}
@article{p875,
journal = {IEEE TVCG},
year = 2009,
title = {Towards Utilizing GPUs in Information Visualization: A Model and Implementation of Image-Space Operations},
doi = {10.1109/TVCG.2009.191},
url = {http://dx.doi.org/10.1109/TVCG.2009.191},
author = {McDonnel, B. and Elmqvist, N.},
pages = {1105--1112},
keywords = {GPU-acceleration, shader programming, interaction, high-performance visualization},
abstract = {Modern programmable GPUs represent a vast potential in terms of performance and visual flexibility for information visualization research, but surprisingly few applications even begin to utilize this potential. In this paper, we conjecture that this may be due to the mismatch between the high-level abstract data types commonly visualized in our field, and the low-level floating-point model supported by current GPU shader languages. To help remedy this situation, we present a refinement of the traditional information visualization pipeline that is amenable to implementation using GPU shaders. The refinement consists of a final image-space step in the pipeline where the multivariate data of the visualization is sampled in the resolution of the current view. To concretize the theoretical aspects of this work, we also present a visual programming environment for constructing visualization shaders using a simple drag-and-drop interface. Finally, we give some examples of the use of shaders for well-known visualization techniques.},
}
@article{p876,
journal = {IEEE TVCG},
year = 2009,
title = {Visual Analysis of Inter-Process Communication for Large-Scale Parallel Computing},
doi = {10.1109/TVCG.2009.196},
url = {http://dx.doi.org/10.1109/TVCG.2009.196},
author = {Muelder, C. and Gygi, F. and Kwan-Liu Ma},
pages = {1129--1136},
keywords = {Information Visualization, MPI Profiling, Scalability},
abstract = {In serial computation, program profiling is often helpful for optimization of key sections of code. When moving to parallel computation, not only does the code execution need to be considered but also communication between the different processes which can induce delays that are detrimental to performance. As the number of processes increases, so does the impact of the communication delays on performance. For large-scale parallel applications, it is critical to understand how the communication impacts performance in order to make the code more efficient. There are several tools available for visualizing program execution and communications on parallel systems. These tools generally provide either views which statistically summarize the entire program execution or process-centric views. However, process-centric visualizations do not scale well as the number of processes gets very large. In particular, the most common representation of parallel processes is a Gantt chart with a row for each process. As the number of processes increases, these charts can become difficult to work with and can even exceed screen resolution. We propose a new visualization approach that affords more scalability and then demonstrate it on systems running with up to 16,384 processes.},
}
@article{p877,
journal = {IEEE TVCG},
year = 2009,
title = {Visualizing Social Photos on a Hasse Diagram for Eliciting Relations and Indexing New Photos},
doi = {10.1109/TVCG.2009.201},
url = {http://dx.doi.org/10.1109/TVCG.2009.201},
author = {Crampes, M. and de Oliveira-Kumar, J. and Ranwez, S. and Villerd, J.},
pages = {985--992},
keywords = {Information visualization, Hasse Diagram, indexation, social photos, formal concept analysis, Galois sub-hierarchy},
abstract = {Social photos, which are taken during family events or parties, represent individuals or groups of people. We show in this paper how a Hasse diagram is an efficient visualization strategy for eliciting different groups and navigating through them. However, we do not limit this strategy to these traditional uses. Instead we show how it can also be used for assisting in indexing new photos. Indexing consists of identifying the event and people in photos. It is an integral phase that takes place before searching and sharing. In our method we use existing indexed photos to index new photos. This is performed through a manual drag and drop procedure followed by a content fusion process that we call 'propagation'. At the core of this process is the necessity to organize and visualize the photos that will be used for indexing in a manner that is easily recognizable and accessible by the user. In this respect we make use of an object Galois sub-hierarchy and display it using a Hasse diagram. The need for an incremental display that maintains the user's mental map also leads us to propose a novel way of building the Hasse diagram. To validate the approach, we present some tests conducted with a sample of users that confirm the interest of this organization, visualization and indexation approach. Finally, we conclude by considering scalability, the possibility to extract social networks and automatically create personalised albums.},
}
@article{p878,
journal = {IEEE TVCG},
year = 2009,
title = {Visualizing the Intellectual Structure with Paper-Reference Matrices},
doi = {10.1109/TVCG.2009.202},
url = {http://dx.doi.org/10.1109/TVCG.2009.202},
author = {Jian Zhang and Chen, C. and Jiexun Li},
pages = {1153--1160},
keywords = {Intellectual Structure, Paper-reference Matrix, FP-tree, Co-citation},
abstract = {Visualizing the intellectual structure of scientific domains using co-cited units such as references or authors has become a routine for domain analysis. In previous studies, paper-reference matrices are usually transformed into reference-reference matrices to obtain co-citation relationships, which are then visualized in different representations, typically as node-link networks, to represent the intellectual structures of scientific domains. Such network visualizations sometimes contain tightly knit components, which make visual analysis of the intellectual structure a challenging task. In this study, we propose a new approach to reveal co-citation relationships. Instead of using a reference-reference matrix, we directly use the original paper-reference matrix as the information source, and transform the paper-reference matrix into an FP-tree and visualize it in a Java-based prototype system. We demonstrate the usefulness of our approach through visual analyses of the intellectual structure of two domains: information visualization and Sloan Digital Sky Survey (SDSS). The results show that our visualization not only retains the major information of co-citation relationships, but also reveals more detailed sub-structures of tightly knit clusters than a conventional node-link network visualization.},
}
@article{p992,
journal = {IEEE TVCG},
year = 2008,
title = {A Framework of Interaction Costs in Information Visualization},
doi = {10.1109/TVCG.2008.109},
url = {http://dx.doi.org/10.1109/TVCG.2008.109},
author = {Lam, H.},
pages = {1149--1156},
keywords = {Interaction, Information Visualization, Framework, Interface Evaluation},
abstract = {Interaction cost is an important but poorly understood factor in visualization design. We propose a framework of interaction costs inspired by Normanpsilas Seven Stages of Action to facilitate study. From 484 papers, we collected 61 interaction-related usability problems reported in 32 user studies and placed them into our framework of seven costs: (1) Decision costs to form goals; (2) system-power costs to form system operations; (3) Multiple input mode costs to form physical sequences; (4) Physical-motion costs to execute sequences; (5) Visual-cluttering costs to perceive state; (6) View-change costs to interpret perception; (7) State-change costs to evaluate interpretation. We also suggested ways to narrow the gulfs of execution (2-4) and evaluation (5-7) based on collected reports. Our framework suggests a need to consider decision costs (1) as the gulf of goal formation.},
}
@article{p993,
journal = {IEEE TVCG},
year = 2008,
title = {Balloon Focus: a Seamless Multi-Focus+Context Method for Treemaps},
doi = {10.1109/TVCG.2008.114},
url = {http://dx.doi.org/10.1109/TVCG.2008.114},
author = {Ying Tu and Han-Wei Shen},
pages = {1157--1164},
keywords = {Treemap, focus+context, multi-focus, fisheye, magnification, visualizing query results, multi-scale viewing},
abstract = {The treemap is one of the most popular methods for visualizing hierarchical data. When a treemap contains a large number of items, inspecting or comparing a few selected items in a greater level of detail becomes very challenging. In this paper, we present a seamless multi-focus and context technique, called Balloon Focus, that allows the user to smoothly enlarge multiple treemap items served as the foci, while maintaining a stable treemap layout as the context. Our method has several desirable features. First, this method is quite general and can be used with different treemap layout algorithms. Second, as the foci are enlarged, the relative positions among all items are preserved. Third, the foci are placed in a way that the remaining space is evenly distributed back to the non-focus treemap items. When Balloon Focus enlarges the focus items to a maximum degree, the above features ensure that the treemap will maintain a consistent appearance and avoid any abrupt layout changes. In our algorithm, a DAG (Directed Acyclic Graph) is used to maintain the positional constraints, and an elastic model is employed to govern the placement of the treemap items. We demonstrate a treemap visualization system that integrates data query, manual focus selection, and our novel multi-focus+context technique, Balloon Focus, together. A user study was conducted. Results show that with Balloon Focus, users can better perform the tasks of comparing the values and the distribution of the foci.},
}
@article{p994,
journal = {IEEE TVCG},
year = 2008,
title = {Cerebral: Visualizing Multiple Experimental Conditions on a Graph with Biological Context},
doi = {10.1109/TVCG.2008.117},
url = {http://dx.doi.org/10.1109/TVCG.2008.117},
author = {Barsky, A. and Munzner, T. and Gardy, J. and Kincaid, R.},
pages = {1253--1260},
keywords = {Graph layout, systems biology visualization, small multiples, design study},
abstract = {Systems biologists use interaction graphs to model the behavior of biological systems at the molecular level. In an iterative process, such biologists observe the reactions of living cells under various experimental conditions, view the results in the context of the interaction graph, and then propose changes to the graph model. These graphs serve as a form of dynamic knowledge representation of the biological system being studied and evolve as new insight is gained from the experimental data. While numerous graph layout and drawing packages are available, these tools did not fully meet the needs of our immunologist collaborators. In this paper, we describe the data information display needs of these immunologists and translate them into design decisions. These decisions led us to create Cerebral, a system that uses a biologically guided graph layout and incorporates experimental data directly into the graph display. Small multiple views of different experimental conditions and a data-driven parallel coordinates view enable correlations between experimental conditions to be analyzed at the same time that the data is viewed in the graph context. This combination of coordinated views allows the biologist to view the data from many different perspectives simultaneously. To illustrate the typical analysis tasks performed, we analyze two datasets using Cerebral. Based on feedback from our collaborators we conclude that Cerebral is a valuable tool for analyzing experimental data in the context of an interaction graph model.},
}
@article{p995,
journal = {IEEE TVCG},
year = 2008,
title = {Distributed Cognition as a Theoretical Framework for Information Visualization},
doi = {10.1109/TVCG.2008.121},
url = {http://dx.doi.org/10.1109/TVCG.2008.121},
author = {Zhicheng Liu and Nersessian, N.J. and Stasko, J.},
pages = {1173--1180},
keywords = {Information visualization, distributed cognition, interaction, representation, theory and methods},
abstract = {Even though information visualization (InfoVis) research has matured in recent years, it is generally acknowledged that the field still lacks supporting, encompassing theories. In this paper, we argue that the distributed cognition framework can be used to substantiate the theoretical foundation of InfoVis. We highlight fundamental assumptions and theoretical constructs of the distributed cognition approach, based on the cognitive science literature and a real life scenario. We then discuss how the distributed cognition framework can have an impact on the research directions and methodologies we take as InfoVis researchers. Our contributions are as follows. First, we highlight the view that cognition is more an emergent property of interaction than a property of the human mind. Second, we argue that a reductionist approach to study the abstract properties of isolated human minds may not be useful in informing InfoVis design. Finally we propose to make cognition an explicit research agenda, and discuss the implications on how we perform evaluation and theory building.},
}
@article{p996,
journal = {IEEE TVCG},
year = 2008,
title = {Effectiveness of Animation in Trend Visualization},
doi = {10.1109/TVCG.2008.125},
url = {http://dx.doi.org/10.1109/TVCG.2008.125},
author = {Robertson, G. and Fernandez, R. and Fisher, D. and Bongshin Lee and Stasko, J.},
pages = {1325--1332},
keywords = {Information visualization, animation, trends, design, experiment},
abstract = {Animation has been used to show trends in multi-dimensional data. This technique has recently gained new prominence for presentations, most notably with Gapminder Trendalyzer. In Trendalyzer, animation together with interesting data and an engaging presenter helps the audience understand the results of an analysis of the data. It is less clear whether trend animation is effective for analysis. This paper proposes two alternative trend visualizations that use static depictions of trends: one which shows traces of all trends overlaid simultaneously in one display and a second that uses a small multiples display to show the trend traces side-by-side. The paper evaluates the three visualizations for both analysis and presentation. Results indicate that trend animation can be challenging to use even for presentations; while it is the fastest technique for presentation and participants find it enjoyable and exciting, it does lead to many participant errors. Animation is the least effective form for analysis; both static depictions of trends are significantly faster than animation, and the small multiples display is more accurate.},
}
@article{p997,
journal = {IEEE TVCG},
year = 2008,
title = {EMDialog: Bringing Information Visualization into the Museum},
doi = {10.1109/TVCG.2008.127},
url = {http://dx.doi.org/10.1109/TVCG.2008.127},
author = {Hinrichs, U. and Schmidt, H. and Carpendale, S.},
pages = {1181--1188},
keywords = {artistic information visualization, interactive information visualization, walk-up-and-use interaction, public displays},
abstract = {Digital information displays are becoming more common in public spaces such as museums, galleries, and libraries. However, the public nature of these locations requires special considerations concerning the design of information visualization in terms of visual representations and interaction techniques. We discuss the potential for, and challenges of, information visualization in the museum context based on our practical experience with EMDialog, an interactive information presentation that was part of the Emily Carr exhibition at the Glenbow Museum in Calgary. EMDialog visualizes the diverse and multi-faceted discourse about this Canadian artist with the goal to both inform and provoke discussion. It provides a visual exploration environment that offers interplay between two integrated visualizations, one for information access along temporal, and the other along contextual dimensions. We describe the results of an observational study we conducted at the museum that revealed the different ways visitors approached and interacted with EMDialog, as well as how they perceived this form of information presentation in the museum context. Our results include the need to present information in a manner sufficiently attractive to draw attention and the importance of rewarding passive observation as well as both short- and longer term information exploration.},
}
@article{p998,
journal = {IEEE TVCG},
year = 2008,
title = {Evaluating the Use of Data Transformation for Information Visualization},
doi = {10.1109/TVCG.2008.129},
url = {http://dx.doi.org/10.1109/TVCG.2008.129},
author = {Zhen Wen and Zhou, M.X.},
pages = {1309--1316},
keywords = {data transformation, data cleaning, empirical evaluation, user studies},
abstract = {Data transformation, the process of preparing raw data for effective visualization, is one of the key challenges in information visualization. Although researchers have developed many data transformation techniques, there is little empirical study of the general impact of data transformation on visualization. Without such study, it is difficult to systematically decide when and which data transformation techniques are needed. We thus have designed and conducted a two-part empirical study that examines how the use of common data transformation techniques impacts visualization quality, which in turn affects user task performance. Our first experiment studies the impact of data transformation on user performance in single-step, typical visual analytic tasks. The second experiment assesses the impact of data transformation in multi-step analytic tasks. Our results quantify the benefits of data transformation in both experiments. More importantly, our analyses reveal that (1) the benefits of data transformation vary significantly by task and by visualization, and (2) the use of data transformation depends on a user's interaction context. Based on our findings, we present a set of design recommendations that help guide the development and use of data transformation techniques.},
}
@article{p999,
journal = {IEEE TVCG},
year = 2008,
title = {Exploration of Networks using overview+detail with Constraint-based cooperative layout},
doi = {10.1109/TVCG.2008.130},
url = {http://dx.doi.org/10.1109/TVCG.2008.130},
author = {Dwyer, T. and Marriott, K. and Schreiber, F. and Stuckey, P. and Woodward, M. and Wybrow, M.},
pages = {1293--1300},
keywords = {Graph drawing, constraints, stress majorization, force directed algorithms, multidimensional scaling},
abstract = {A standard approach to large network visualization is to provide an overview of the network and a detailed view of a small component of the graph centred around a focal node. The user explores the network by changing the focal node in the detailed view or by changing the level of detail of a node or cluster. For scalability, fast force-based layout algorithms are used for the overview and the detailed view. However, using the same layout algorithm in both views is problematic since layout for the detailed view has different requirements to that in the overview. Here we present a model in which constrained graph layout algorithms are used for layout in the detailed view. This means the detailed view has high-quality layout including sophisticated edge routing and is customisable by the user who can add placement constraints on the layout. Scalability is still ensured since the slower layout techniques are only applied to the small subgraph shown in the detailed view. The main technical innovations are techniques to ensure that the overview and detailed view remain synchronized, and modifying constrained graph layout algorithms to support smooth, stable layout. The key innovation supporting stability are new dynamic graph layout algorithms that preserve the topology or structure of the network when the user changes the focus node or the level of detail by in situ semantic zooming. We have built a prototype tool and demonstrate its use in two application domains, UML class diagrams and biological networks.},
}
@article{p1000,
journal = {IEEE TVCG},
year = 2008,
title = {Geometry-Based Edge Clustering for Graph Visualization},
doi = {10.1109/TVCG.2008.135},
url = {http://dx.doi.org/10.1109/TVCG.2008.135},
author = {Weiwei Cui and Hong Zhou and Huamin Qu and Pak Chung Wong and Xiaoming Li},
pages = {1277--1284},
keywords = {Graph visualization, visual clutter, mesh, edge clustering},
abstract = {Graphs have been widely used to model relationships among data. For large graphs, excessive edge crossings make the display visually cluttered and thus difficult to explore. In this paper, we propose a novel geometry-based edge-clustering framework that can group edges into bundles to reduce the overall edge crossings. Our method uses a control mesh to guide the edge-clustering process; edge bundles can be formed by forcing all edges to pass through some control points on the mesh. The control mesh can be generated at different levels of detail either manually or automatically based on underlying graph patterns. Users can further interact with the edge-clustering results through several advanced visualization techniques such as color and opacity enhancement. Compared with other edge-clustering methods, our approach is intuitive, flexible, and efficient. The experiments on some large graphs demonstrate the effectiveness of our method.},
}
@article{p1001,
journal = {IEEE TVCG},
year = 2008,
title = {Graphical Histories for Visualization: Supporting Analysis, Communication, and Evaluation},
doi = {10.1109/TVCG.2008.137},
url = {http://dx.doi.org/10.1109/TVCG.2008.137},
author = {Heer, J. and Mackinlay, J. and Stolte, C. and Agrawala, M.},
pages = {1189--1196},
keywords = {Visualization, history, undo, analysis, presentation, evaluation},
abstract = {Interactive history tools, ranging from basic undo and redo to branching timelines of user actions, facilitate iterative forms of interaction. In this paper, we investigate the design of history mechanisms for information visualization. We present a design space analysis of both architectural and interface issues, identifying design decisions and associated trade-offs. Based on this analysis, we contribute a design study of graphical history tools for Tableau, a database visualization system. These tools record and visualize interaction histories, support data analysis and communication of findings, and contribute novel mechanisms for presenting, managing, and exporting histories. Furthermore, we have analyzed aggregated collections of history sessions to evaluate Tableau usage. We describe additional tools for analyzing userspsila history logs and how they have been applied to study usage patterns in Tableau.},
}
@article{p1002,
journal = {IEEE TVCG},
year = 2008,
title = {HiPP: A Novel Hierarchical Point Placement Strategy and its Application to the Exploration of Document Collections},
doi = {10.1109/TVCG.2008.138},
url = {http://dx.doi.org/10.1109/TVCG.2008.138},
author = {Paulovich, F.V. and Minghim, R.},
pages = {1229--1236},
keywords = {Text and document visualization, hierarchical multidimensional visualization, visual knowledge discovery, high-dimensional data},
abstract = {Point placement strategies aim at mapping data points represented in higher dimensions to bi-dimensional spaces and are frequently used to visualize relationships amongst data instances. They have been valuable tools for analysis and exploration of data sets of various kinds. Many conventional techniques, however, do not behave well when the number of dimensions is high, such as in the case of documents collections. Later approaches handle that shortcoming, but may cause too much clutter to allow flexible exploration to take place. In this work we present a novel hierarchical point placement technique that is capable of dealing with these problems. While good grouping and separation of data with high similarity is maintained without increasing computation cost, its hierarchical structure lends itself both to exploration in various levels of detail and to handling data in subsets, improving analysis capability and also allowing manipulation of larger data sets.},
}
@article{p1003,
journal = {IEEE TVCG},
year = 2008,
title = {Improving the Readability of Clustered Social Networks using Node Duplication},
doi = {10.1109/TVCG.2008.141},
url = {http://dx.doi.org/10.1109/TVCG.2008.141},
author = {Henry, N. and Bezerianos, A. and Fekete, J.},
pages = {1317--1324},
keywords = {Clustering, Graph Visualization, Node Duplications, Social Networks},
abstract = {Exploring communities is an important task in social network analysis. Such communities are currently identified using clustering methods to group actors. This approach often leads to actors belonging to one and only one cluster, whereas in real life a person can belong to several communities. As a solution we propose duplicating actors in social networks and discuss potential impact of such a move. Several visual duplication designs are discussed and a controlled experiment comparing network visualization with and without duplication is performed, using 6 tasks that are important for graph readability and visual interpretation of social networks. We show that in our experiment, duplications significantly improve community-related tasks but sometimes interfere with other graph readability tasks. Finally, we propose a set of guidelines for deciding when to duplicate actors and choosing candidates for duplication, and alternative ways to render them in social network representations.},
}
@article{p1004,
journal = {IEEE TVCG},
year = 2008,
title = {Interactive Visual Analysis of Set-Typed Data},
doi = {10.1109/TVCG.2008.144},
url = {http://dx.doi.org/10.1109/TVCG.2008.144},
author = {Freiler, W. and Matkovic, K. and Hauser, H.},
pages = {1340--1347},
keywords = {Interactive Visual Analysis, Multidimensional Multivariate Data Visualization, Categorical Data Visualization, Interactive Visualization, Focus+Context Visualization, Multiple Coordinated Views},
abstract = {While it is quite typical to deal with attributes of different data types in the visualization of heterogeneous and multivariate datasets, most existing techniques still focus on the most usual data types such as numerical attributes or strings. In this paper we present a new approach to the interactive visual exploration and analysis of data that contains attributes which are of set type. A set-typed attribute of a data item - like one cell in a table - has a list of nGt=0 elements as its value. We present the setpsilaopsilagram as a new visualization approach to represent data of set type and to enable interactive visual exploration and analysis. We also demonstrate how this approach is capable to help in dealing with datasets that have a larger number of dimensions (more than a dozen or more), especially also in the context of categorical data. To illustrate the effectiveness of our approach, we present the interactive visual analysis of a CRM dataset with data from a questionnaire on the education and shopping habits of about 90000 people.},
}
@article{p1005,
journal = {IEEE TVCG},
year = 2008,
title = {Multi-Focused Geospatial Analysis Using Probes},
doi = {10.1109/TVCG.2008.149},
url = {http://dx.doi.org/10.1109/TVCG.2008.149},
author = {Butkiewicz, T. and Wenwen Dou and Wartell, Z. and Ribarsky, W. and Chang, R.},
pages = {1165--1172},
keywords = {Multiple-view techniques, geospatial visualization, geospatial analysis, focus + context, probes},
abstract = {Traditional geospatial information visualizations often present views that restrict the user to a single perspective. When zoomed out, local trends and anomalies become suppressed and lost; when zoomed in for local inspection, spatial awareness and comparison between regions become limited. In our model, coordinated visualizations are integrated within individual probe interfaces, which depict the local data in user-defined regions-of-interest. Our probe concept can be incorporated into a variety of geospatial visualizations to empower users with the ability to observe, coordinate, and compare data across multiple local regions. It is especially useful when dealing with complex simulations or analyses where behavior in various localities differs from other localities and from the system as a whole. We illustrate the effectiveness of our technique over traditional interfaces by incorporating it within three existing geospatial visualization systems: an agent-based social simulation, a census data exploration tool, and an 3D GIS environment for analyzing urban change over time. In each case, the probe-based interaction enhances spatial awareness, improves inspection and comparison capabilities, expands the range of scopes, and facilitates collaboration among multiple users.},
}
@article{p1006,
journal = {IEEE TVCG},
year = 2008,
title = {On the Visualization of Social and other Scale-Free Networks},
doi = {10.1109/TVCG.2008.151},
url = {http://dx.doi.org/10.1109/TVCG.2008.151},
author = {Yuntao Jia and Hoberock, J. and Garland, M. and Hart, J.C.},
pages = {1285--1292},
keywords = {Scale-free network, edge filtering, betweenness centrality, anisotropic shading},
abstract = {This paper proposes novel methods for visualizing specifically the large power-law graphs that arise in sociology and the sciences. In such cases a large portion of edges can be shown to be less important and removed while preserving component connectedness and other features (e.g. cliques) to more clearly reveal the networkpsilas underlying connection pathways. This simplification approach deterministically filters (instead of clustering) the graph to retain important node and edge semantics, and works both automatically and interactively. The improved graph filtering and layout is combined with a novel computer graphics anisotropic shading of the dense crisscrossing array of edges to yield a full social network and scale-free graph visualization system. Both quantitative analysis and visual results demonstrate the effectiveness of this approach.},
}
@article{p1007,
journal = {IEEE TVCG},
year = 2008,
title = {Particle-based labeling: Fast point-feature labeling without obscuring other visual features},
doi = {10.1109/TVCG.2008.152},
url = {http://dx.doi.org/10.1109/TVCG.2008.152},
author = {Luboschik, M. and Schumann, H. and Cords, H.},
pages = {1237--1244},
keywords = {Interactive labeling, dynamic labeling, automatic label placement, occlusion-free, information visualization},
abstract = {In many information visualization techniques, labels are an essential part to communicate the visualized data. To preserve the expressiveness of the visual representation, a placed label should neither occlude other labels nor visual representatives (e.g., icons, lines) that communicate crucial information. Optimal, non-overlapping labeling is an NP-hard problem. Thus, only a few approaches achieve a fast non-overlapping labeling in highly interactive scenarios like information visualization. These approaches generally target the point-feature label placement (PFLP) problem, solving only label-label conflicts. This paper presents a new, fast, solid and flexible 2D labeling approach for the PFLP problem that additionally respects other visual elements and the visual extent of labeled features. The results (number of placed labels, processing time) of our particle-based method compare favorably to those of existing techniques. Although the esthetic quality of non-real-time approaches may not be achieved with our method, it complies with practical demands and thus supports the interactive exploration of information spaces. In contrast to the known adjacent techniques, the flexibility of our technique enables labeling of dense point clouds by the use of non-occluding distant labels. Our approach is independent of the underlying visualization technique, which enables us to demonstrate the application of our labeling method within different information visualization scenarios.},
}
@article{p1008,
journal = {IEEE TVCG},
year = 2008,
title = {Perceptual Organization in User-Generated Graph Layouts},
doi = {10.1109/TVCG.2008.155},
url = {http://dx.doi.org/10.1109/TVCG.2008.155},
author = {van Ham, F. and Rogowitz, B.},
pages = {1333--1339},
keywords = {Network layout visualization, perceptual organization, graph layout, user studies},
abstract = {Many graph layout algorithms optimize visual characteristics to achieve useful representations. Implicitly, their goal is to create visual representations that are more intuitive to human observers. In this paper, we asked users to explicitly manipulate nodes in a network diagram to create layouts that they felt best captured the relationships in the data. This allowed us to measure organizational behavior directly, allowing us to evaluate the perceptual importance of particular visual features, such as edge crossings and edge-lengths uniformity. We also manipulated the interior structure of the node relationships by designing data sets that contained clusters, that is, sets of nodes that are strongly interconnected. By varying the degree to which these clusters were ldquomaskedrdquo by extraneous edges we were able to measure observerspsila sensitivity to the existence of clusters and how they revealed them in the network diagram. Based on these measurements we found that observers are able to recover cluster structure, that the distance between clusters is inversely related to the strength of the clustering, and that users exhibit the tendency to use edges to visually delineate perceptual groups. These results demonstrate the role of perceptual organization in representing graph data and provide concrete recommendations for graph layout algorithms.},
}
@article{p1009,
journal = {IEEE TVCG},
year = 2008,
title = {Rapid Graph Layout Using Space filling Curves},
doi = {10.1109/TVCG.2008.158},
url = {http://dx.doi.org/10.1109/TVCG.2008.158},
author = {Muelder, C. and Kwan-Liu Ma},
pages = {1301--1308},
keywords = { Information visualization, Graph layout, Space filling curves},
abstract = {Network data frequently arises in a wide variety of fields, and node-link diagrams are a very natural and intuitive representation of such data. In order for a node-link diagram to be effective, the nodes must be arranged well on the screen. While many graph layout algorithms exist for this purpose, they often have limitations such as high computational complexity or node colocation. This paper proposes a new approach to graph layout through the use of space filling curves which is very fast and guarantees that there will be no nodes that are colocated. The resulting layout is also aesthetic and satisfies several criteria for graph layout effectiveness.},
}
@article{p1010,
journal = {IEEE TVCG},
year = 2008,
title = {Rolling the Dice: Multidimensional Visual Exploration using Scatterplot Matrix Navigation},
doi = {10.1109/TVCG.2008.153},
url = {http://dx.doi.org/10.1109/TVCG.2008.153},
author = {Elmqvist, N. and Dragicevic, P. and Fekete, J.},
pages = {1141--1148},
keywords = {Visual exploration, visual queries, visual analytics, navigation, multivariate data, interaction},
abstract = {Scatterplots remain one of the most popular and widely-used visual representations for multidimensional data due to their simplicity, familiarity and visual clarity, even if they lack some of the flexibility and visual expressiveness of newer multidimensional visualization techniques. This paper presents new interactive methods to explore multidimensional data using scatterplots. This exploration is performed using a matrix of scatterplots that gives an overview of the possible configurations, thumbnails of the scatterplots, and support for interactive navigation in the multidimensional space. Transitions between scatterplots are performed as animated rotations in 3D space, somewhat akin to rolling dice. Users can iteratively build queries using bounding volumes in the dataset, sculpting the query from different viewpoints to become more and more refined. Furthermore, the dimensions in the navigation space can be reordered, manually or automatically, to highlight salient correlations and differences among them. An example scenario presents the interaction techniques supporting smooth and effortless visual exploration of multidimensional datasets.},
}
@article{p1011,
journal = {IEEE TVCG},
year = 2008,
title = {Spatially Ordered Treemaps},
doi = {10.1109/TVCG.2008.165},
url = {http://dx.doi.org/10.1109/TVCG.2008.165},
author = {Wood, J. and Dykes, J.},
pages = {1348--1355},
keywords = {Geovisualization, treemaps, cartograms, CIELab, geographic information, tree structures},
abstract = {Existing treemap layout algorithms suffer to some extent from poor or inconsistent mappings between data order and visual ordering in their representation, reducing their cognitive plausibility. While attempts have been made to quantify this mismatch, and algorithms proposed to minimize inconsistency, solutions provided tend to concentrate on one-dimensional ordering. We propose extensions to the existing squarified layout algorithm that exploit the two-dimensional arrangement of treemap nodes more effectively. Our proposed spatial squarified layout algorithm provides a more consistent arrangement of nodes while maintaining low aspect ratios. It is suitable for the arrangement of data with a geographic component and can be used to create tessellated cartograms for geovisualization. Locational consistency is measured and visualized and a number of layout algorithms are compared. CIELab color space and displacement vector overlays are used to assess and emphasize the spatial layout of treemap nodes. A case study involving locations of tagged photographs in the Flickr database is described.},
}
@article{p1012,
journal = {IEEE TVCG},
year = 2008,
title = {Stacked Graphs - Geometry & Aesthetics},
doi = {10.1109/TVCG.2008.166},
url = {http://dx.doi.org/10.1109/TVCG.2008.166},
author = {Byron, L. and Wattenberg, M.},
pages = {1245--1252},
keywords = {Streamgraph, ThemeRiver, listening history, lastfm, aesthetics, communication-minded visualization, time series},
abstract = {In February 2008, the New York Times published an unusual chart of box office revenues for 7500 movies over 21 years. The chart was based on a similar visualization, developed by the first author, that displayed trends in music listening. This paper describes the design decisions and algorithms behind these graphics, and discusses the reaction on the Web. We suggest that this type of complex layered graph is effective for displaying large data sets to a mass audience. We provide a mathematical analysis of how this layered graph relates to traditional stacked graphs and to techniques such as ThemeRiver, showing how each method is optimizing a different ldquoenergy functionrdquo. Finally, we discuss techniques for coloring and ordering the layers of such graphs. Throughout the paper, we emphasize the interplay between considerations of aesthetics and legibility.},
}
@article{p1013,
journal = {IEEE TVCG},
year = 2008,
title = {The Shaping of Information by Visual Metaphors},
doi = {10.1109/TVCG.2008.171},
url = {http://dx.doi.org/10.1109/TVCG.2008.171},
author = {Ziemkiewicz, C. and Kosara, R.},
pages = {1269--1276},
keywords = {Cognition, visualization theory, metaphors, hierarchies, evaluation},
abstract = {The nature of an information visualization can be considered to lie in the visual metaphors it uses to structure information. The process of understanding a visualization therefore involves an interaction between these external visual metaphors and the user's internal knowledge representations. To investigate this claim, we conducted an experiment to test the effects of visual metaphor and verbal metaphor on the understanding of tree visualizations. Participants answered simple data comprehension questions while viewing either a treemap or a node-link diagram. Questions were worded to reflect a verbal metaphor that was either compatible or incompatible with the visualization a participant was using. The results suggest that the visual metaphor indeed affects how a user derives information from a visualization. Additionally, we found that the degree to which a user is affected by the metaphor is strongly correlated with the user's ability to answer task questions correctly. These findings are a first step towards illuminating how visual metaphors shape user understanding, and have significant implications for the evaluation, application, and theory of visualization.},
}
@article{p1014,
journal = {IEEE TVCG},
year = 2008,
title = {The Word Tree, an Interactive Visual Concordance},
doi = {10.1109/TVCG.2008.172},
url = {http://dx.doi.org/10.1109/TVCG.2008.172},
author = {Wattenberg, M. and Viegas, F.B.},
pages = {1221--1228},
keywords = {Text visualization, document visualization, Many Eyes, case study, concordance, information retrieval, search},
abstract = {We introduce the Word Tree, a new visualization and information-retrieval technique aimed at text documents. A Word Tree is a graphical version of the traditional "keyword-in-context" method, and enables rapid querying and exploration of bodies of text. In this paper we describe the design of the technique, along with some of the technical issues that arise in its implementation. In addition, we discuss the results of several months of public deployment of word trees on Many Eyes, which provides a window onto the ways in which users obtain value from the visualization.},
}
@article{p1015,
journal = {IEEE TVCG},
year = 2008,
title = {VisGets: Coordinated Visualizations for Web-based Information Exploration and Discovery},
doi = {10.1109/TVCG.2008.175},
url = {http://dx.doi.org/10.1109/TVCG.2008.175},
author = {Dork, M. and Carpendale, S. and Collins, C. and Williamson, C.},
pages = {1205--1212},
keywords = {Information visualization, World Wide Web, information retrieval, exploratory search, visual information seeking},
abstract = {In common Web-based search interfaces, it can be difficult to formulate queries that simultaneously combine temporal, spatial, and topical data filters. We investigate how coordinated visualizations can enhance search and exploration of information on the World Wide Web by easing the formulation of these types of queries. Drawing from visual information seeking and exploratory search, we introduce VisGets - interactive query visualizations of Web-based information that operate with online information within a Web browser. VisGets provide the information seeker with visual overviews of Web resources and offer a way to visually filter the data. Our goal is to facilitate the construction of dynamic search queries that combine filters from more than one data dimension. We present a prototype information exploration system featuring three linked VisGets (temporal, spatial, and topical), and used it to visually explore news items from online RSS feeds.},
}
@article{p1016,
journal = {IEEE TVCG},
year = 2008,
title = {Vispedia: Interactive Visual Exploration of Wikipedia Data via Search-Based Integration},
doi = {10.1109/TVCG.2008.178},
url = {http://dx.doi.org/10.1109/TVCG.2008.178},
author = {Chan, B. and Wu, L. and Talbot, J. and Cammarano, M. and Hanrahan, P.},
pages = {1213--1220},
keywords = {Information visualization, Data integration, Wikipedia, Semantic web, Search interfaces},
abstract = {Wikipedia is an example of the collaborative, semi-structured data sets emerging on the Web. These data sets have large, non-uniform schema that require costly data integration into structured tables before visualization can begin. We present Vispedia, a Web-based visualization system that reduces the cost of this data integration. Users can browse Wikipedia, select an interesting data table, then use a search interface to discover, integrate, and visualize additional columns of data drawn from multiple Wikipedia articles. This interaction is supported by a fast path search algorithm over DBpedia, a semantic graph extracted from Wikipedia's hyperlink structure. Vispedia can also export the augmented data tables produced for use in traditional visualization systems. We believe that these techniques begin to address the "long tail" of visualization by allowing a wider audience to visualize a broader class of data. We evaluated this system in a first-use formative lab study. Study participants were able to quickly create effective visualizations for a diverse set of domains, performing data integration as needed.},
}
@article{p1017,
journal = {IEEE TVCG},
year = 2008,
title = {Visualizing Incomplete and Partially Ranked Data},
doi = {10.1109/TVCG.2008.181},
url = {http://dx.doi.org/10.1109/TVCG.2008.181},
author = {Kidwell, P. and Lebanon, G. and Cleveland, W.S.},
pages = {1356--1363},
keywords = {Partial rankings, incomplete rankings, multidimensional scaling},
abstract = {Ranking data, which result from m raters ranking n items, are difficult to visualize due to their discrete algebraic structure, and the computational difficulties associated with them when n is large. This problem becomes worse when raters provide tied rankings or not all items are ranked. We develop an approach for the visualization of ranking data for large n which is intuitive, easy to use, and computationally efficient. The approach overcomes the structural and computational difficulties by utilizing a natural measure of dissimilarity for raters, and projecting the raters into a low dimensional vector space where they are viewed. The visualization techniques are demonstrated using voting data, jokes, and movie preferences.},
}
@article{p1018,
journal = {IEEE TVCG},
year = 2008,
title = {Viz-A-Vis: Toward Visualizing Video through Computer Vision},
doi = {10.1109/TVCG.2008.185},
url = {http://dx.doi.org/10.1109/TVCG.2008.185},
author = {Romero, M. and Summet, J. and Stasko, J. and Abowd, G.},
pages = {1261--1268},
keywords = {Spatiotemporal visualization, time series data, video visualization, sensor analytics, image/video analytics},
abstract = {In the established procedural model of information visualization, the first operation is to transform raw data into data tables. The transforms typically include abstractions that aggregate and segment relevant data and are usually defined by a human, user or programmer. The theme of this paper is that for video, data transforms should be supported by low level computer vision. High level reasoning still resides in the human analyst, while part of the low level perception is handled by the computer. To illustrate this approach, we present Viz-A-Vis, an overhead video capture and access system for activity analysis in natural settings over variable periods of time. Overhead video provides rich opportunities for long-term behavioral and occupancy analysis, but it poses considerable challenges. We present initial steps addressing two challenges. First, overhead video generates overwhelmingly large volumes of video impractical to analyze manually. Second, automatic video analysis remains an open problem for computer vision.},
}
@article{p1019,
journal = {IEEE TVCG},
year = 2008,
title = {Who Votes For What? A Visual Query Language for Opinion Data},
doi = {10.1109/TVCG.2008.187},
url = {http://dx.doi.org/10.1109/TVCG.2008.187},
author = {Draper, G. and Riesenfeld, R.F.},
pages = {1197--1204},
keywords = {Visual query languages, radial visualization, data analysis, human-computer interaction},
abstract = {Surveys and opinion polls are extremely popular in the media, especially in the months preceding a general election. However, the available tools for analyzing poll results often require specialized training. Hence, data analysis remains out of reach for many casual computer users. Moreover, the visualizations used to communicate the results of surveys are typically limited to traditional statistical graphics like bar graphs and pie charts, both of which are fundamentally noninteractive. We present a simple interactive visualization that allows users to construct queries on large tabular data sets, and view the results in real time. The results of two separate user studies suggest that our interface lowers the learning curve for naive users, while still providing enough analytical power to discover interesting correlations in the data.},
}
@article{p1117,
journal = {IEEE TVCG},
year = 2007,
title = {A Taxonomy of Clutter Reduction for Information Visualisation},
doi = {10.1109/TVCG.2007.70535},
url = {http://dx.doi.org/10.1109/TVCG.2007.70535},
author = {Ellis, G. and Dix, A.},
pages = {1216--1223},
keywords = {Clutter reduction, information visualisation, occlusion, large datasets, taxonomy},
abstract = {Information visualisation is about gaining insight into data through a visual representation. This data is often multivariate and increasingly, the datasets are very large. To help us explore all this data, numerous visualisation applications, both commercial and research prototypes, have been designed using a variety of techniques and algorithms. Whether they are dedicated to geo-spatial data or skewed hierarchical data, most of the visualisations need to adopt strategies for dealing with overcrowded displays, brought about by too much data to fit in too small a display space. This paper analyses a large number of these clutter reduction methods, classifying them both in terms of how they deal with clutter reduction and more importantly, in terms of the benefits and losses. The aim of the resulting taxonomy is to act as a guide to match techniques to problems where different criteria may have different importance, and more importantly as a means to critique and hence develop existing and new techniques.},
}
@article{p1118,
journal = {IEEE TVCG},
year = 2007,
title = {AdaptiviTree: Adaptive Tree Visualization for Tournament-Style Brackets},
doi = {10.1109/TVCG.2007.70537},
url = {http://dx.doi.org/10.1109/TVCG.2007.70537},
author = {Tan, D.S. and Smith, G. and Bongshin Lee and Robertson, G.},
pages = {1113--1120},
keywords = {Online fantasy sports, tournament, bracket, picks, adaptive tree visualization},
abstract = {Online pick'em games, such as the recent NCAA college basketball March Madness tournament, form a large and rapidly growing industry. In these games, players make predictions on a tournament bracket that defines which competitors play each other and how they proceed toward a single champion. Throughout the course of the tournament, players monitor the brackets to track progress and to compare predictions made by multiple players. This is often a complex sense making task. The classic bracket visualization was designed for use on paper and utilizes an incrementally additive system in which the winner of each match-up is rewritten in the next round as the tournament progresses. Unfortunately, this representation requires a significant amount of space and makes it relatively difficult to get a quick overview of the tournament state since competitors take arbitrary paths through the static bracket. In this paper, we present AdaptiviTree, a novel visualization that adaptively deforms the representation of the tree and uses its shape to convey outcome information. AdaptiviTree not only provides a more compact and understandable representation, but also allows overlays that display predictions as well as other statistics. We describe results from a lab study we conducted to explore the efficacy of AdaptiviTree, as well as from a deployment of the system in a recent real-world sports tournament.},
}
@article{p1119,
journal = {IEEE TVCG},
year = 2007,
title = {Animated Transitions in Statistical Data Graphics},
doi = {10.1109/TVCG.2007.70539},
url = {http://dx.doi.org/10.1109/TVCG.2007.70539},
author = {Heer, J. and Robertson, G.},
pages = {1240--1247},
keywords = {Statistical data graphics, animation, transitions, information visualization, design, experiment},
abstract = {In this paper we investigate the effectiveness of animated transitions between common statistical data graphics such as bar charts, pie charts, and scatter plots. We extend theoretical models of data graphics to include such transitions, introducing a taxonomy of transition types. We then propose design principles for creating effective transitions and illustrate the application of these principles in DynaVis, a visualization system featuring animated data graphics. Two controlled experiments were conducted to assess the efficacy of various transition types, finding that animated transitions can significantly improve graphical perception.},
}
@article{p1120,
journal = {IEEE TVCG},
year = 2007,
title = {Browsing Zoomable Treemaps: Structure-Aware Multi-Scale Navigation Techniques},
doi = {10.1109/TVCG.2007.70540},
url = {http://dx.doi.org/10.1109/TVCG.2007.70540},
author = {Blanch, R. and Lecolinet, E.},
pages = {1248--1253},
keywords = {Information visualization, multi-scale interaction, structure-aware navigation, zoomable treemaps},
abstract = {Treemaps provide an interesting solution for representing hierarchical data. However, most studies have mainly focused on layout algorithms and paid limited attention to the interaction with treemaps. This makes it difficult to explore large data sets and to get access to details, especially to those related to the leaves of the trees. We propose the notion of zoomable treemaps (ZTMs), an hybridization between treemaps and zoomable user interfaces that facilitates the navigation in large hierarchical data sets. By providing a consistent set of interaction techniques, ZTMs make it possible for users to browse through very large data sets (e.g., 700,000 nodes dispatched amongst 13 levels). These techniques use the structure of the displayed data to guide the interaction and provide a way to improve interactive navigation in treemaps.},
}
@article{p1121,
journal = {IEEE TVCG},
year = 2007,
title = {Casual Information Visualization: Depictions of Data in Everyday Life},
doi = {10.1109/TVCG.2007.70541},
url = {http://dx.doi.org/10.1109/TVCG.2007.70541},
author = {Pousman, Z. and Stasko, J. and Mateas, M.},
pages = {1145--1152},
keywords = {Casual information visualization, ambient infovis, social infovis, editorial, design, evaluation},
abstract = {Information visualization has often focused on providing deep insight for expert user populations and on techniques for amplifying cognition through complicated interactive visual models. This paper proposes a new subdomain for infovis research that complements the focus on analytic tasks and expert use. Instead of work-related and analytically driven infovis, we propose casual information visualization (or casual infovis) as a complement to more traditional infovis domains. Traditional infovis systems, techniques, and methods do not easily lend themselves to the broad range of user populations, from expert to novices, or from work tasks to more everyday situations. We propose definitions, perspectives, and research directions for further investigations of this emerging subfield. These perspectives build from ambient information visualization (Skog et al., 2003), social visualization, and also from artistic work that visualizes information (Viegas and Wattenberg, 2007). We seek to provide a perspective on infovis that integrates these research agendas under a coherent vocabulary and framework for design. We enumerate the following contributions. First, we demonstrate how blurry the boundary of infovis is by examining systems that exhibit many of the putative properties of infovis systems, but perhaps would not be considered so. Second, we explore the notion of insight and how, instead of a monolithic definition of insight, there may be multiple types, each with particular characteristics. Third, we discuss design challenges for systems intended for casual audiences. Finally we conclude with challenges for system evaluation in this emerging subfield.},
}
@article{p1122,
journal = {IEEE TVCG},
year = 2007,
title = {Exploring Multiple Trees through DAG Representations},
doi = {10.1109/TVCG.2007.70556},
url = {http://dx.doi.org/10.1109/TVCG.2007.70556},
author = {Graham, M. and Kennedy, J.},
pages = {1294--1301},
keywords = {Multiple trees, Directed Acyclic Graph},
abstract = {We present a directed acyclic graph visualisation designed to allow interaction with a set of multiple classification trees, specifically to find overlaps and differences between groups of trees and individual trees. The work is motivated by the need to find a representation for multiple trees that has the space-saving property of a general graph representation and the intuitive parent-child direction cues present in individual representation of trees. Using example taxonomic data sets, we describe augmentations to the common barycenter DAG layout method that reveal shared sets of child nodes between common parents in a clearer manner. Other interactions such as displaying the multiple ancestor paths of a node when it occurs in several trees, and revealing intersecting sibling sets within the context of a single DAG representation are also discussed.},
}
@article{p1123,
journal = {IEEE TVCG},
year = 2007,
title = {Geographically Weighted Visualization: Interactive Graphics for Scale-Varying Exploratory Analysis},
doi = {10.1109/TVCG.2007.70558},
url = {http://dx.doi.org/10.1109/TVCG.2007.70558},
author = {Dykes, J. and Brunsdon, C.},
pages = {1161--1168},
keywords = {Geographical weighting, exploratory data analysis, scale, multivariate, directional, interaction, coordinated views},
abstract = {We introduce a series of geographically weighted (GW) interactive graphics, or geowigs, and use them to explore spatial relationships at a range of scales. We visually encode information about geographic and statistical proximity and variation in novel ways through gw-choropleth maps, multivariate gw-boxplots, gw-shading and scalograms. The new graphic types reveal information about GW statistics at several scales concurrently. We impement these views in prototype software containing dynamic links and GW interactions that encourage exploration and refine them to consider directional geographies. An informal evaluation uses interactive GW techniques to consider Guerry's dataset of 'moral statistics', casting doubt on correlations originally proposed through visual analysis, revealing new local anomalies and suggesting multivariate geographic relationships. Few attempts at visually synthesising geography with multivariate statistical values at multiple scales have been reported. The geowigs proposed here provide informative representations of multivariate local variation, particularly when combined with interactions that coordinate views and result in gw-shading. We argue that they are widely applicable to area and point-based geographic data and provide a set of methods to support visual analysis using GW statistics through which the effects of geography can be explored at multiple scales.},
}
@article{p1124,
journal = {IEEE TVCG},
year = 2007,
title = {Hotmap: Looking at Geographic Attention},
doi = {10.1109/TVCG.2007.70561},
url = {http://dx.doi.org/10.1109/TVCG.2007.70561},
author = {Fisher, D.},
pages = {1184--1191},
keywords = {Geographical visualization, GIS, heatmap, server log analysis, online mapping systems, social navigation},
abstract = {Understanding how people use online maps allows data acquisition teams to concentrate their efforts on the portions of the map that are most seen by users. Online maps represent vast databases, and so it is insufficient to simply look at a list of the most-accessed URLs. Hotmap takes advantage of the design of a mapping system's imagery pyramid to superpose a heatmap of the log files over the original maps. Users' behavior within the system can be observed and interpreted. This paper discusses the imagery acquisition task that motivated Hotmap, and presents several examples of information that Hotmap makes visible. We discuss the design choices behind Hotmap, including logarithmic color schemes; low-saturation background images; and tuning images to explore both infrequently-viewed and frequently-viewed spaces.},
}
@article{p1125,
journal = {IEEE TVCG},
year = 2007,
title = {Interactive Tree Comparison for Co-located Collaborative Information Visualization},
doi = {10.1109/TVCG.2007.70568},
url = {http://dx.doi.org/10.1109/TVCG.2007.70568},
author = {Isenberg, P. and Carpendale, S.},
pages = {1232--1239},
keywords = {Information visualization, collaboration, co-located work, hierarchical data comparison},
abstract = {In many domains, increased collaboration has lead to more innovation by fostering the sharing of knowledge, skills, and ideas. Shared analysis of information visualizations does not only lead to increased information processing power, but team members can also share, negotiate, and discuss their views and interpretations on a dataset and contribute unique perspectives on a given problem. Designing technologies to support collaboration around information visualizations poses special challenges and relatively few systems have been designed. We focus on supporting small groups collaborating around information visualizations in a co-located setting, using a shared interactive tabletop display. We introduce an analysis of challenges and requirements for the design of co-located collaborative information visualization systems. We then present a new system that facilitates hierarchical data comparison tasks for this type of collaborative work. Our system supports multi-user input, shared and individual views on the hierarchical data visualization, flexible use of representations, and flexible workspace organization to facilitate group work around visualizations.},
}
@article{p1126,
journal = {IEEE TVCG},
year = 2007,
title = {Interactive Visual Exploration of a Large Spatio-temporal Dataset: Reflections on a Geovisualization Mashup.},
doi = {10.1109/TVCG.2007.70570},
url = {http://dx.doi.org/10.1109/TVCG.2007.70570},
author = {Wood, J. and Dykes, J. and Slingsby, A. and Clarke, K.},
pages = {1176--1183},
keywords = {Large dataset visualization, text and document visualization, multiresolution visualization, geographic visualization, applications of infovis},
abstract = {Exploratory visual analysis is useful for the preliminary investigation of large structured, multifaceted spatio-temporal datasets. This process requires the selection and aggregation of records by time, space and attribute, the ability to transform data and the flexibility to apply appropriate visual encodings and interactions. We propose an approach inspired by geographical 'mashups' in which freely-available functionality and data are loosely but flexibly combined using de facto exchange standards. Our case study combines MySQL, PHP and the LandSerf GIS to allow Google Earth to be used for visual synthesis and interaction with encodings described in KML. This approach is applied to the exploration of a log of 1.42 million requests made of a mobile directory service. Novel combinations of interaction and visual encoding are developed including spatial 'tag clouds', 'tag maps', 'data dials' and multi-scale density surfaces. Four aspects of the approach are informally evaluated: the visual encodings employed, their success in the visual exploration of the dataset, the specific tools used and the 'mashup' approach. Preliminary findings will be beneficial to others considering using mashups for visualization. The specific techniques developed may be more widely applied to offer insights into the structure of multifarious spatio-temporal data of the type explored here.},
}
@article{p1127,
journal = {IEEE TVCG},
year = 2007,
title = {Legible Cities: Focus-Dependent Multi-Resolution Visualization of Urban Relationships},
doi = {10.1109/TVCG.2007.70574},
url = {http://dx.doi.org/10.1109/TVCG.2007.70574},
author = {Chang, R. and Wessel, G. and Kosara, R. and Sauda, E. and Ribarsky, W.},
pages = {1169--1175},
keywords = {Urban models, information visualization, multi-resolution},
abstract = {Numerous systems have been developed to display large collections of data for urban contexts; however, most have focused on layering of single dimensions of data and manual calculations to understand relationships within the urban environment. Furthermore, these systems often limit the user's perspectives on the data, thereby diminishing the user's spatial understanding of the viewing region. In this paper, we introduce a highly interactive urban visualization tool that provides intuitive understanding of the urban data. Our system utilizes an aggregation method that combines buildings and city blocks into legible clusters, thus providing continuous levels of abstraction while preserving the user's mental model of the city. In conjunction with a 3D view of the urban model, a separate but integrated information visualization view displays multiple disparate dimensions of the urban data, allowing the user to understand the urban environment both spatially and cognitively in one glance. For our evaluation, expert users from various backgrounds viewed a real city model with census data and confirmed that our system allowed them to gain more intuitive and deeper understanding of the urban model from different perspectives and levels of abstraction than existing commercial urban visualization systems.},
}
@article{p1128,
journal = {IEEE TVCG},
year = 2007,
title = {ManyEyes: a Site for Visualization at Internet Scale},
doi = {10.1109/TVCG.2007.70577},
url = {http://dx.doi.org/10.1109/TVCG.2007.70577},
author = {Viegas, F.B. and Wattenberg, M. and van Ham, F. and Kriss, J. and McKeon, M.},
pages = {1121--1128},
keywords = {Visualization, World Wide Web, Social Software, Social Data Analysis, Communication-Minded Visualization},
abstract = {We describe the design and deployment of Many Eyes, a public Web site where users may upload data, create interactive visualizations, and carry on discussions. The goal of the site is to support collaboration around visualizations at a large scale by fostering a social style of data analysis in which visualizations not only serve as a discovery tool for individuals but also as a medium to spur discussion among users. To support this goal, the site includes novel mechanisms for end-user creation of visualizations and asynchronous collaboration around those visualizations. In addition to describing these technologies, we provide a preliminary report on the activity of our users.},
}
@article{p1129,
journal = {IEEE TVCG},
year = 2007,
title = {Multi-Level Graph Layout on the GPU},
doi = {10.1109/TVCG.2007.70580},
url = {http://dx.doi.org/10.1109/TVCG.2007.70580},
author = {Frishman, Y. and Tal, A.},
pages = {1310--1319},
keywords = {Graph layout, GPU, graph partitioning},
abstract = {This paper presents a new algorithm for force directed graph layout on the GPU. The algorithm, whose goal is to compute layouts accurately and quickly, has two contributions. The first contribution is proposing a general multi-level scheme, which is based on spectral partitioning. The second contribution is computing the layout on the GPU. Since the GPU requires a data parallel programming model, the challenge is devising a mapping of a naturally unstructured graph into a well-partitioned structured one. This is done by computing a balanced partitioning of a general graph. This algorithm provides a general multi-level scheme, which has the potential to be used not only for computation on the GPU, but also on emerging multi-core architectures. The algorithm manages to compute high quality layouts of large graphs in a fraction of the time required by existing algorithms of similar quality. An application for visualization of the topologies of ISP (Internet service provider) networks is presented.},
}
@article{p1130,
journal = {IEEE TVCG},
year = 2007,
title = {NodeTrix: a Hybrid Visualization of Social Networks},
doi = {10.1109/TVCG.2007.70582},
url = {http://dx.doi.org/10.1109/TVCG.2007.70582},
author = {Henry, N. and Fekete, J. and McGuffin, M.J.},
pages = {1302--1309},
keywords = {Network visualization, Matrix visualization, Hybrid visualization, Aggregation, Interaction},
abstract = {The need to visualize large social networks is growing as hardware capabilities make analyzing large networks feasible and many new data sets become available. Unfortunately, the visualizations in existing systems do not satisfactorily resolve the basic dilemma of being readable both for the global structure of the network and also for detailed analysis of local communities. To address this problem, we present NodeTrix, a hybrid representation for networks that combines the advantages of two traditional representations: node-link diagrams are used to show the global structure of a network, while arbitrary portions of the network can be shown as adjacency matrices to better support the analysis of communities. A key contribution is a set of interaction techniques. These allow analysts to create a NodeTrix visualization by dragging selections to and from node-link and matrix forms, and to flexibly manipulate the NodeTrix representation to explore the dataset and create meaningful summary visualizations of their findings. Finally, we present a case study applying NodeTrix to the analysis of the InfoVis 2004 coauthorship dataset to illustrate the capabilities of NodeTrix as both an exploration tool and an effective means of communicating results.},
}
@article{p1131,
journal = {IEEE TVCG},
year = 2007,
title = {Overview Use in Multiple Visual Information Resolution Interfaces},
doi = {10.1109/TVCG.2007.70583},
url = {http://dx.doi.org/10.1109/TVCG.2007.70583},
author = {Lam, H. and Munzner, T. and Kincaid, R.},
pages = {1278--1285},
keywords = {Multiple resolutions, overview use, user study},
abstract = {In interfaces that provide multiple visual information resolutions (VIR), low-VIR overviews typically sacrifice visual details for display capacity, with the assumption that users can select regions of interest to examine at higher VI Rs. Designers can create low VIRs based on multi-level structure inherent in the data, but have little guidance with single-level data. To better guide design tradeoff between display capacity and visual target perceivability, we looked at overview use in two multiple-VIR interfaces with high-VIR displays either embedded within, or separate from, the overviews. We studied two visual requirements for effective overview and found that participants would reliably use the low-VIR overviews only when the visual targets were simple and had small visual spans. Otherwise, at least 20% chose to use the high-VIR view exclusively. Surprisingly, neither of the multiple-VIR interfaces provided performance benefits when compared to using the high-VIR view alone. However, we did observe benefits in providing side-by-side comparisons for target matching. We conjecture that the high cognitive load of multiple-VIR interface interactions, whether real or perceived, is a more considerable barrier to their effective use than was previously considered.},
}
@article{p1132,
journal = {IEEE TVCG},
year = 2007,
title = {Scented Widgets: Improving Navigation Cues with Embedded Visualizations},
doi = {10.1109/TVCG.2007.70589},
url = {http://dx.doi.org/10.1109/TVCG.2007.70589},
author = {Willett, W. and Heer, J. and Agrawala, M.},
pages = {1129--1136},
keywords = {Information visualization, user interface toolkits, information foraging, social navigation, social data analysis},
abstract = {This paper presents scented widgets, graphical user interface controls enhanced with embedded visualizations that facilitate navigation in information spaces. We describe design guidelines for adding visual cues to common user interface widgets such as radio buttons, sliders, and combo boxes and contribute a general software framework for applying scented widgets within applications with minimal modifications to existing source code. We provide a number of example applications and describe a controlled experiment which finds that users exploring unfamiliar data make up to twice as many unique discoveries using widgets imbued with social navigation data. However, these differences equalize as familiarity with the data increases.},
}
@article{p1133,
journal = {IEEE TVCG},
year = 2007,
title = {Sequential Document Visualization},
doi = {10.1109/TVCG.2007.70592},
url = {http://dx.doi.org/10.1109/TVCG.2007.70592},
author = {Yi Mao and Dillon, J.V. and Lebanon, G.},
pages = {1208--1215},
keywords = {Document visualization, multi-resolution analysis, local fitting},
abstract = {Documents and other categorical valued time series are often characterized by the frequencies of short range sequential patterns such as n-grams. This representation converts sequential data of varying lengths to high dimensional histogram vectors which are easily modeled by standard statistical models. Unfortunately, the histogram representation ignores most of the medium and long range sequential dependencies making it unsuitable for visualizing sequential data. We present a novel framework for sequential visualization of discrete categorical time series based on the idea of local statistical modeling. The framework embeds categorical time series as smooth curves in the multinomial simplex summarizing the progression of sequential trends. We discuss several visualization techniques based on the above framework and demonstrate their usefulness for document visualization.},
}
@article{p1134,
journal = {IEEE TVCG},
year = 2007,
title = {Show Me: Automatic Presentation for Visual Analysis},
doi = {10.1109/TVCG.2007.70594},
url = {http://dx.doi.org/10.1109/TVCG.2007.70594},
author = {Mackinlay, J. and Hanrahan, P. and Stolte, C.},
pages = {1137--1144},
keywords = {Automatic presentation, visual analysis, graphic design, best practices, data visualization, small multiples},
abstract = {This paper describes Show Me, an integrated set of user interface commands and defaults that incorporate automatic presentation into a commercial visual analysis system called Tableau. A key aspect of Tableau is VizQL, a language for specifying views, which is used by Show Me to extend automatic presentation to the generation of tables of views (commonly called small multiple displays). A key research issue for the commercial application of automatic presentation is the user experience, which must support the flow of visual analysis. User experience has not been the focus of previous research on automatic presentation. The Show Me user experience includes the automatic selection of mark types, a command to add a single field to a view, and a pair of commands to build views for multiple fields. Although the use of these defaults and commands is optional, user interface logs indicate that Show Me is used by commercial users.},
}
@article{p1135,
journal = {IEEE TVCG},
year = 2007,
title = {Spatialization Design: Comparing Points and Landscapes},
doi = {10.1109/TVCG.2007.70596},
url = {http://dx.doi.org/10.1109/TVCG.2007.70596},
author = {Tory, M. and Sprague, D.W. and Fuqu Wu and Wing Yan So and Munzner, T.},
pages = {1262--1269},
keywords = {Spatialization, Information Landscape, User Study, Numerosity, 3D, 2D, Colour, Greyscale, Surface, Points},
abstract = {Spatializations represent non-spatial data using a spatial layout similar to a map. We present an experiment comparing different visual representations of spatialized data, to determine which representations are best for a non-trivial search and point estimation task. Primarily, we compare point-based displays to 2D and 3D information landscapes. We also compare a colour (hue) scale to a grey (lightness) scale. For the task we studied, point-based spatializations were far superior to landscapes, and 2D landscapes were superior to 3D landscapes. Little or no benefit was found for redundantly encoding data using colour or greyscale combined with landscape height. 3D landscapes with no colour scale (height-only) were particularly slow and inaccurate. A colour scale was found to be better than a greyscale for all display types, but a greyscale was helpful compared to height-only. These results suggest that point-based spatializations should be chosen over landscape representations, at least for tasks involving only point data itself rather than derived information about the data space.},
}
@article{p1136,
journal = {IEEE TVCG},
year = 2007,
title = {Toward a Deeper Understanding of the Role of Interaction in Information Visualization},
doi = {10.1109/TVCG.2007.70515},
url = {http://dx.doi.org/10.1109/TVCG.2007.70515},
author = {Ji Soo Yi and Youn-ah Kang and Stasko, J. and Jacko, J.A.},
pages = {1224--1231},
keywords = {Information visualization, interaction, interaction techniques, taxonomy, visual analytics},
abstract = {Even though interaction is an important part of information visualization (Infovis), it has garnered a relatively low level of attention from the Infovis community. A few frameworks and taxonomies of Infovis interaction techniques exist, but they typically focus on low-level operations and do not address the variety of benefits interaction provides. After conducting an extensive review of Infovis systems and their interactive capabilities, we propose seven general categories of interaction techniques widely used in Infovis: 1) Select, 2) Explore, 3) Reconfigure, 4) Encode, 5) Abstract/Elaborate, 6) Filter, and 7) Connect. These categories are organized around a user's intent while interacting with a system rather than the low-level interaction techniques provided by a system. The categories can act as a framework to help discuss and evaluate interaction techniques and hopefully lay an initial foundation toward a deeper understanding and a science of interaction.},
}
@article{p1137,
journal = {IEEE TVCG},
year = 2007,
title = {VisLink: Revealing Relationships Amongst Visualizations},
doi = {10.1109/TVCG.2007.70521},
url = {http://dx.doi.org/10.1109/TVCG.2007.70521},
author = {Collins, C. and Carpendale, S.},
pages = {1192--1199},
keywords = {Graph visualization, node-link diagrams, structural comparison, hierarchies, 3D visualization, edge aggregation},
abstract = {We present VisLink, a method by which visualizations and the relationships between them can be interactively explored. VisLink readily generalizes to support multiple visualizations, empowers inter-representational queries, and enables the reuse of the spatial variables, thus supporting efficient information encoding and providing for powerful visualization bridging. Our approach uses multiple 2D layouts, drawing each one in its own plane. These planes can then be placed and re-positioned in 3D space: side by side, in parallel, or in chosen placements that provide favoured views. Relationships, connections, and patterns between visualizations can be revealed and explored using a variety of interaction techniques including spreading activation and search filters.},
}
@article{p1138,
journal = {IEEE TVCG},
year = 2007,
title = {Visual Analysis of Network Traffic for Resource Planning, Interactive Monitoring, and Interpretation of Security Threats},
doi = {10.1109/TVCG.2007.70522},
url = {http://dx.doi.org/10.1109/TVCG.2007.70522},
author = {Mansmann, F. and Keim, D.A. and North, S.C. and Rexroad, B. and Sheleheda, D.},
pages = {1105--1112},
keywords = {Information visualization, network security, network monitoring, treemap},
abstract = {The Internet has become a wild place: malicious code is spread on personal computers across the world, deploying botnets ready to attack the network infrastructure. The vast number of security incidents and other anomalies overwhelms attempts at manual analysis, especially when monitoring service provider backbone links. We present an approach to interactive visualization with a case study indicating that interactive visualization can be applied to gain more insight into these large data sets. We superimpose a hierarchy on IP address space, and study the suitability of Treemap variants for each hierarchy level. Because viewing the whole IP hierarchy at once is not practical for most tasks, we evaluate layout stability when eliding large parts of the hierarchy, while maintaining the visibility and ordering of the data of interest.},
}
@article{p1139,
journal = {IEEE TVCG},
year = 2007,
title = {Visualization of Heterogeneous Data},
doi = {10.1109/TVCG.2007.70617},
url = {http://dx.doi.org/10.1109/TVCG.2007.70617},
author = {Cammarano, M. and Xin Dong and Bryan Chan and Klingner, J. and Talbot, J. and Halevy, A. and Hanrahan, P.},
pages = {1200--1207},
keywords = {Data integration, RDF, attribute inference},
abstract = {Both the resource description framework (RDF), used in the semantic web, and Maya Viz u-forms represent data as a graph of objects connected by labeled edges. Existing systems for flexible visualization of this kind of data require manual specification of the possible visualization roles for each data attribute. When the schema is large and unfamiliar, this requirement inhibits exploratory visualization by requiring a costly up-front data integration step. To eliminate this step, we propose an automatic technique for mapping data attributes to visualization attributes. We formulate this as a schema matching problem, finding appropriate paths in the data model for each required visualization attribute in a visualization template.},
}
@article{p1140,
journal = {IEEE TVCG},
year = 2007,
title = {Visualizing Causal Semantics Using Animations},
doi = {10.1109/TVCG.2007.70528},
url = {http://dx.doi.org/10.1109/TVCG.2007.70528},
author = {Kadaba, N.R. and Irani, P. and Leboe, J.},
pages = {1254--1261},
keywords = {Causality, visualization, semantics, animated graphs, perception, visualizing cause and effect, graph semantics},
abstract = {Michotte's theory of ampliation suggests that causal relationships are perceived by objects animated under appropriate spatiotemporal conditions. We extend the theory of ampliation and propose that the immediate perception of complex causal relations is also dependent on a set of structural and temporal rules. We designed animated representations, based on Michotte's rules, for showing complex causal relationships or causal semantics. In this paper we describe a set of animations for showing semantics such as causal amplification, causal strength, causal dampening, and causal multiplicity. In a two part study we compared the effectiveness of both the static and animated representations. The first study (N=44) asked participants to recall passages that were previously displayed using both types of representations. Participants were 8% more accurate in recalling causal semantics when they were presented using animations instead of static graphs. In the second study (N=112) we evaluated the intuitiveness of the representations. Our results showed that while users were as accurate with the static graphs as with the animations, they were 9% faster in matching the correct causal statements in the animated condition. Overall our results show that animated diagrams that are designed based on perceptual rules such as those proposed by Michotte have the potential to facilitate comprehension of complex causal relations.},
}
@article{p1141,
journal = {IEEE TVCG},
year = 2007,
title = {Visualizing Changes of Hierarchical Data using Treemaps},
doi = {10.1109/TVCG.2007.70529},
url = {http://dx.doi.org/10.1109/TVCG.2007.70529},
author = {Ying Tu and Han-Wei Shen},
pages = {1286--1293},
keywords = {Treemap, tree comparison, visualize changes, treemap layout algorithm},
abstract = {While the treemap is a popular method for visualizing hierarchical data, it is often difficult for users to track layout and attribute changes when the data evolve over time. When viewing the treemaps side by side or back and forth, there exist several problems that can prevent viewers from performing effective comparisons. Those problems include abrupt layout changes, a lack of prominent visual patterns to represent layouts, and a lack of direct contrast to highlight differences. In this paper, we present strategies to visualize changes of hierarchical data using treemaps. A new treemap layout algorithm is presented to reduce abrupt layout changes and produce consistent visual patterns. Techniques are proposed to effectively visualize the difference and contrast between two treemap snapshots in terms of the map items' colors, sizes, and positions. Experimental data show that our algorithm can achieve a good balance in maintaining a treemap's stability, continuity, readability, and average aspect ratio. A software tool is created to compare treemaps and generate the visualizations. User studies show that the users can better understand the changes in the hierarchy and layout, and more quickly notice the color and size differences using our method.},
}
@article{p1142,
journal = {IEEE TVCG},
year = 2007,
title = {Visualizing the History of Living Spaces},
doi = {10.1109/TVCG.2007.70621},
url = {http://dx.doi.org/10.1109/TVCG.2007.70621},
author = {Ivanov, Y.A. and Wren, C.R. and Sorokin, A. and Kaur, I.},
pages = {1153--1160},
keywords = {Sensor networks, user interfaces, surveillance, timeline, spatio-temporal visualization},
abstract = {The technology available to building designers now makes it possible to monitor buildings on a very large scale. Video cameras and motion sensors are commonplace in practically every office space, and are slowly making their way into living spaces. The application of such technologies, in particular video cameras, while improving security, also violates privacy. On the other hand, motion sensors, while being privacy-conscious, typically do not provide enough information for a human operator to maintain the same degree of awareness about the space that can be achieved by using video cameras. We propose a novel approach in which we use a large number of simple motion sensors and a small set of video cameras to monitor a large office space. In our system we deployed 215 motion sensors and six video cameras to monitor the 3,000-square-meter office space occupied by 80 people for a period of about one year. The main problem in operating such systems is finding a way to present this highly multidimensional data, which includes both spatial and temporal components, to a human operator to allow browsing and searching recorded data in an efficient and intuitive way. In this paper we present our experiences and the solutions that we have developed in the course of our work on the system. We consider this work to be the first step in helping designers and managers of building systems gain access to information about occupants' behavior in the context of an entire building in a way that is only minimally intrusive to the occupants' privacy.},
}
@article{p1143,
journal = {IEEE TVCG},
year = 2007,
title = {Weaving Versus Blending: a quantitative assessment of the information carrying capacities of two alternative methods for conveying multivariate data with color.},
doi = {10.1109/TVCG.2007.70623},
url = {http://dx.doi.org/10.1109/TVCG.2007.70623},
author = {Hagh-Shenas, H. and Sunghee Kim and Interrante, V. and Healey, C.},
pages = {1270--1277},
keywords = {Color, perception, visualization, color weaving, color blending},
abstract = {In many applications, it is important to understand the individual values of, and relationships between, multiple related scalar variables defined across a common domain. Several approaches have been proposed for representing data in these situations. In this paper we focus on strategies for the visualization of multivariate data that rely on color mixing. In particular, through a series of controlled observer experiments, we seek to establish a fundamental understanding of the information-carrying capacities of two alternative methods for encoding multivariate information using color: color blending and color weaving. We begin with a baseline experiment in which we assess participants' abilities to accurately read numerical data encoded in six different basic color scales defined in the L*a*b* color space. We then assess participants' abilities to read combinations of 2, 3, 4 and 6 different data values represented in a common region of the domain, encoded using either color blending or color weaving. In color blending a single mixed color is formed via linear combination of the individual values in L*a*b* space, and in color weaving the original individual colors are displayed side-by-side in a high frequency texture that fills the region. A third experiment was conducted to clarify some of the trends regarding the color contrast and its effect on the magnitude of the error that was observed in the second experiment. The results indicate that when the component colors are represented side-by-side in a high frequency texture, most participants' abilities to infer the values of individual components are significantly improved, relative to when the colors are blended. Participants' performance was significantly better with color weaving particularly when more than 2 colors were used, and even when the individual colors subtended only 3 minutes of visual angle in the texture. However, the information-carrying capacity of the color weaving approach has its limits. - - We found that participants' abilities to accurately interpret each of the individual components in a high frequency color texture typically falls off as the number of components increases from 4 to 6. We found no significant advantages, in either color blending or color weaving, to using color scales based on component hues thatare more widely separated in the L*a*b* color space. Furthermore, we found some indications that extra difficulties may arise when opponent hues are employed.},
}
@article{p1248,
journal = {IEEE TVCG},
year = 2006,
title = {ASK-graphView: a large scale graph visualization system},
doi = {10.1109/TVCG.2006.120},
url = {http://dx.doi.org/10.1109/TVCG.2006.120},
author = {Abello, J. and van Ham, F. and Neeraj Krishnan},
pages = {669--676},
keywords = {Information visualization, graph visualization, graph clustering},
abstract = {We describe ASK-GraphView, a node-link-based graph visualization system that allows clustering and interactive navigation of large graphs, ranging in size up to 16 million edges. The system uses a scalable architecture and a series of increasingly sophisticated clustering algorithms to construct a hierarchy on an arbitrary, weighted undirected input graph. By lowering the interactivity requirements we can scale to substantially bigger graphs. The user is allowed to navigate this hierarchy in a top down manner by interactively expanding individual clusters. ASK-GraphView also provides facilities for filtering and coloring, annotation and cluster labeling},
}
@article{p1249,
journal = {IEEE TVCG},
year = 2006,
title = {Balancing Systematic and Flexible Exploration of Social Networks},
doi = {10.1109/TVCG.2006.122},
url = {http://dx.doi.org/10.1109/TVCG.2006.122},
author = {Perer, A. and Shneiderman, B.},
pages = {693--700},
keywords = {Social networks, interactive graph visualization, attribute ranking, coordinated views, exploratory data analysis},
abstract = {Social network analysis (SNA) has emerged as a powerful method for understanding the importance of relationships in networks. However, interactive exploration of networks is currently challenging because: (1) it is difficult to find patterns and comprehend the structure of networks with many nodes and links, and (2) current systems are often a medley of statistical methods and overwhelming visual output which leaves many analysts uncertain about how to explore in an orderly manner. This results in exploration that is largely opportunistic. Our contributions are techniques to help structural analysts understand social networks more effectively. We present SocialAction, a system that uses attribute ranking and coordinated views to help users systematically examine numerous SNA measures. Users can (1) flexibly iterate through visualizations of measures to gain an overview, filter nodes, and find outliers, (2) aggregate networks using link structure, find cohesive subgroups, and focus on communities of interest, and (3) untangle networks by viewing different link types separately, or find patterns across different link types using a matrix overview. For each operation, a stable node layout is maintained in the network visualization so users can make comparisons. SocialAction offers analysts a strategy beyond opportunism, as it provides systematic, yet flexible, techniques for exploring social networks},
}
@article{p1250,
journal = {IEEE TVCG},
year = 2006,
title = {Complex Logarithmic Views for Small Details in Large Contexts},
doi = {10.1109/TVCG.2006.126},
url = {http://dx.doi.org/10.1109/TVCG.2006.126},
author = {Bottger, J. and Balzer, M. and Deussen, O.},
pages = {845--852},
keywords = {Detail in context, complex logarithm, conformal mappings, analytic functions, interaction},
abstract = {Commonly known detail in context techniques for the two-dimensional Euclidean space enlarge details and shrink their context using mapping functions that introduce geometrical compression. This makes it difficult or even impossible to recognize shapes for large differences in magnification factors. In this paper we propose to use the complex logarithm and the complex root functions to show very small details even in very large contexts. These mappings are conformal, which means they only locally rotate and scale, thus keeping shapes intact and recognizable. They allow showing details that are orders of magnitude smaller than their surroundings in combination with their context in one seamless visualization. We address the utilization of this universal technique for the interaction with complex two-dimensional data considering the exploration of large graphs and other examples},
}
@article{p1251,
journal = {IEEE TVCG},
year = 2006,
title = {Dynamic Map Labeling},
doi = {10.1109/TVCG.2006.136},
url = {http://dx.doi.org/10.1109/TVCG.2006.136},
author = {Been, K. and Daiches, E. and Yap, C.},
pages = {773--780},
keywords = {Map labeling, dynamic maps, human-computer interface, label placement, label selection, label filtering, label consistency,computational cartography, GIS, HCI, realtime, preprocessing},
abstract = {We address the problem of filtering, selecting and placing labels on a dynamic map, which is characterized by continuous zooming and panning capabilities. This consists of two interrelated issues. The first is to avoid label popping and other artifacts that cause confusion and interrupt navigation, and the second is to label at interactive speed. In most formulations the static map labeling problem is NP-hard, and a fast approximation might have O(n log n) complexity. Even this is too slow during interaction, when the number of labels shown can be several orders of magnitude less than the number in the map. In this paper we introduce a set of desiderata for "consistent" dynamic map labeling, which has qualities desirable for navigation. We develop a new framework for dynamic labeling that achieves the desiderata and allows for fast interactive display by moving all of the selection and placement decisions into the preprocessing phase. This framework is general enough to accommodate a variety of selection and placement algorithms. It does not appear possible to achieve our desiderata using previous frameworks. Prior to this paper, there were no formal models of dynamic maps or of dynamic labels; our paper introduces both. We formulate a general optimization problem for dynamic map labeling and give a solution to a simple version of the problem. The simple version is based on label priorities and a versatile and intuitive class of dynamic label placements we call "invariant point placements". Despite these restrictions, our approach gives a useful and practical solution. Our implementation is incorporated into the G-Vis system which is a full-detail dynamic map of the continental USA. This demo is available through any browser},
}
@article{p1253,
journal = {IEEE TVCG},
year = 2006,
title = {Enabling Automatic Clutter Reduction in Parallel Coordinate Plots},
doi = {10.1109/TVCG.2006.138},
url = {http://dx.doi.org/10.1109/TVCG.2006.138},
author = {Ellis, G. and Dix, A.},
pages = {717--724},
keywords = {Sampling, random sampling, lens, clutter, occlusion, density reduction, overplotting, information visualisation, parallel coordinates\n},
abstract = {We have previously shown that random sampling is an effective clutter reduction technique and that a sampling lens can facilitate focus+context viewing of particular regions. This demands an efficient method of estimating the overlap or occlusion of large numbers of intersecting lines in order to automatically adjust the sampling rate within the lens. This paper proposes several ways for measuring occlusion in parallel coordinate plots. An empirical study into the accuracy and efficiency of the occlusion measures show that a probabilistic approach combined with a 'binning' technique is very fast and yet approaches the accuracy of the more expensive 'true' complete measurement},
}
@article{p1254,
journal = {IEEE TVCG},
year = 2006,
title = {FacetMap: A Scalable Search and Browse Visualization},
doi = {10.1109/TVCG.2006.142},
url = {http://dx.doi.org/10.1109/TVCG.2006.142},
author = {Smith, G. and Czerwinski, M. and Meyers, B.Robbins. and Robertson, G. and Tan, D.S.},
pages = {797--804},
keywords = {Graphical visualization, interactive information retrieval, faceted metadata},
abstract = {The dominant paradigm for searching and browsing large data stores is text-based: presenting a scrollable list of search results in response to textual search term input. While this works well for the Web, there is opportunity for improvement in the domain of personal information stores, which tend to have more heterogeneous data and richer metadata. In this paper, we introduce FacetMap, an interactive, query-driven visualization, generalizable to a wide range of metadata-rich data stores. FacetMap uses a visual metaphor for both input (selection of metadata facets as filters) and output. Results of a user study provide insight into tradeoffs between FacetMap's graphical approach and the traditional text-oriented approach},
}
@article{p1255,
journal = {IEEE TVCG},
year = 2006,
title = {Hierarchical Edge Bundles: Visualization of Adjacency Relations in Hierarchical Data},
doi = {10.1109/TVCG.2006.147},
url = {http://dx.doi.org/10.1109/TVCG.2006.147},
author = {Holten, D.},
pages = {741--748},
keywords = {Network visualization, edge bundling, edge aggregation, edge concentration, curves, graph visualization, tree visualization, node-link diagrams, hierarchies, treemaps},
abstract = {A compound graph is a frequently encountered type of data set. Relations are given between items, and a hierarchy is defined on the items as well. We present a new method for visualizing such compound graphs. Our approach is based on visually bundling the adjacency edges, i.e., non-hierarchical edges, together. We realize this as follows. We assume that the hierarchy is shown via a standard tree visualization method. Next, we bend each adjacency edge, modeled as a B-spline curve, toward the polyline defined by the path via the inclusion edges from one node to another. This hierarchical bundling reduces visual clutter and also visualizes implicit adjacency edges between parent nodes that are the result of explicit adjacency edges between their respective child nodes. Furthermore, hierarchical edge bundling is a generic method which can be used in conjunction with existing tree visualization techniques. We illustrate our technique by providing example visualizations and discuss the results based on an informal evaluation provided by potential users of such visualizations},
}
@article{p1256,
journal = {IEEE TVCG},
year = 2006,
title = {IPSep-CoLa: An Incremental Procedure for Separation Constraint Layout of Graphs},
doi = {10.1109/TVCG.2006.156},
url = {http://dx.doi.org/10.1109/TVCG.2006.156},
author = {Buring, T. and Gerken, J. and Reiterer, H.},
pages = {821--828},
keywords = {Graph drawing, constraints, stress majorization, force directed algorithms,multidimensional scaling},
abstract = {Existing information-visualization techniques that target small screens are usually limited to exploring a few hundred items. In this article we present a scatterplot tool for personal digital assistants that allows the handling of many thousands of items. The application's scalability is achieved by incorporating two alternative interaction techniques: a geometric-semantic zoom that provides smooth transition between overview and detail, and a fisheye distortion that displays the focus and context regions of the scatterplot in a single view. A user study with 24 participants was conducted to compare the usability and efficiency of both techniques when searching a book database containing 7500 items. The study was run on a pen-driven Wacom board simulating a PDA interface. While the results showed no significant difference in task-completion times, a clear majority of 20 users preferred the fisheye view over the zoom interaction. In addition, other dependent variables such as user satisfaction and subjective rating of orientation and navigation support revealed a preference for the fisheye distortion. These findings partly contradict related research and indicate that, when using a small screen, users place higher value on the ability to preserve navigational context than they do on the ease of use of a simplistic, metaphor-based interaction style},
}
@article{p1257,
journal = {IEEE TVCG},
year = 2006,
title = {MatrixExplorer: a Dual-Representation System to Explore Social Networks},
doi = {10.1109/TVCG.2006.160},
url = {http://dx.doi.org/10.1109/TVCG.2006.160},
author = {Henry, N. and Fekete, J.},
pages = {677--684},
keywords = {social networks visualization, node-link diagrams, matrix-based representations, exploratory process, matrix ordering, interactive clustering, consensus},
abstract = {MatrixExplorer is a network visualization system that uses two representations: node-link diagrams and matrices. Its design comes from a list of requirements formalized after several interviews and a participatory design session conducted with social science researchers. Although matrices are commonly used in social networks analysis, very few systems support the matrix-based representations to visualize and analyze networks. MatrixExplorer provides several novel features to support the exploration of social networks with a matrix-based representation, in addition to the standard interactive filtering and clustering functions. It provides tools to reorder (layout) matrices, to annotate and compare findings across different layouts and find consensus among several clusterings. MatrixExplorer also supports node-link diagram views which are familiar to most users and remain a convenient way to publish or communicate exploration results. Matrix and node-link representations are kept synchronized at all stages of the exploration process},
}
@article{p1258,
journal = {IEEE TVCG},
year = 2006,
title = {Measuring Data Abstraction Quality in Multiresolution Visualizations},
doi = {10.1109/TVCG.2006.161},
url = {http://dx.doi.org/10.1109/TVCG.2006.161},
author = {Cui, Q. and Ward, M.O. and Rundensteiner, E.A. and Jing Yang},
pages = {709--716},
keywords = {Metrics, Clustering, Sampling, Multiresolution Visualization},
abstract = {Data abstraction techniques are widely used in multiresolution visualization systems to reduce visual clutter and facilitate analysis from overview to detail. However, analysts are usually unaware of how well the abstracted data represent the original dataset, which can impact the reliability of results gleaned from the abstractions. In this paper, we define two data abstraction quality measures for computing the degree to which the abstraction conveys the original dataset: the histogram difference measure and the nearest neighbor measure. They have been integrated within XmdvTool, a public-domain multiresolution visualization system for multivariate data analysis that supports sampling as well as clustering to simplify data. Several interactive operations are provided, including adjusting the data abstraction level, changing selected regions, and setting the acceptable data abstraction quality level. Conducting these operations, analysts can select an optimal data abstraction level. Also, analysts can compare different abstraction methods using the measures to see how well relative data density and outliers are maintained, and then select an abstraction method that meets the requirement of their analytic tasks},
}
@article{p1259,
journal = {IEEE TVCG},
year = 2006,
title = {Multi-Scale Banking to 45 Degrees},
doi = {10.1109/TVCG.2006.163},
url = {http://dx.doi.org/10.1109/TVCG.2006.163},
author = {Heer, J. and Agrawala, M.},
pages = {701--708},
keywords = {Information visualization, banking to 45 degrees, line charts, time-series, sparklines, graphical perception},
abstract = {In his text Visualizing Data, William Cleveland demonstrates how the aspect ratio of a line chart can affect an analyst's perception of trends in the data. Cleveland proposes an optimization technique for computing the aspect ratio such that the average absolute orientation of line segments in the chart is equal to 45 degrees. This technique, called banking to 45deg, is designed to maximize the discriminability of the orientations of the line segments in the chart. In this paper, we revisit this classic result and describe two new extensions. First, we propose alternate optimization criteria designed to further improve the visual perception of line segment orientations. Second, we develop multi-scale banking, a technique that combines spectral analysis with banking to 45deg. Our technique automatically identifies trends at various frequency scales and then generates a banked chart for each of these scales. We demonstrate the utility of our techniques in a range of visualization tools and analysis examples},
}
@article{p1260,
journal = {IEEE TVCG},
year = 2006,
title = {Network Visualization by Semantic Substrates},
doi = {10.1109/TVCG.2006.166},
url = {http://dx.doi.org/10.1109/TVCG.2006.166},
author = {Shneiderman, B. and Aris, A.},
pages = {733--740},
keywords = {Network visualization, semantic substrate, information visualization, graphical user interfaces},
abstract = {Networks have remained a challenge for information visualization designers because of the complex issues of node and link layout coupled with the rich set of tasks that users present. This paper offers a strategy based on two principles: (1) layouts are based on user-defined semantic substrates, which are non-overlapping regions in which node placement is based on node attributes, (2) users interactively adjust sliders to control link visibility to limit clutter and thus ensure comprehensibility of source and destination. Scalability is further facilitated by user control of which nodes are visible. We illustrate our semantic substrates approach as implemented in NVSS 1.0 with legal precedent data for up to 1122 court cases in three regions with 7645 legal citations},
}
@article{p1261,
journal = {IEEE TVCG},
year = 2006,
title = {Smashing Peacocks Further: Drawing Quasi-Trees from Biconnected Components},
doi = {10.1109/TVCG.2006.177},
url = {http://dx.doi.org/10.1109/TVCG.2006.177},
author = {Archambault, D. and Munzner, T. and Auber, D.},
pages = {813--820},
keywords = {Graph and network visualization, quasi-tree},
abstract = {Quasi-trees, namely graphs with tree-like structure, appear in many application domains, including bioinformatics and computer networks. Our new SPF approach exploits the structure of these graphs with a two-level approach to drawing, where the graph is decomposed into a tree of biconnected components. The low-level biconnected components are drawn with a force-directed approach that uses a spanning tree skeleton as a starting point for the layout. The higher-level structure of the graph is a true tree with meta-nodes of variable size that contain each biconnected component. That tree is drawn with a new area-aware variant of a tree drawing algorithm that handles high-degree nodes gracefully, at the cost of allowing edge-node overlaps. SPF performs an order of magnitude faster than the best previous approaches, while producing drawings of commensurate or improved quality.},
}
@article{p1262,
journal = {IEEE TVCG},
year = 2006,
title = {Software Design Patterns for Information Visualization},
doi = {10.1109/TVCG.2006.178},
url = {http://dx.doi.org/10.1109/TVCG.2006.178},
author = {Heer, J. and Agrawala, M.},
pages = {853--860},
keywords = {Design patterns, information visualization, software engineering, object-oriented programming},
abstract = {Despite a diversity of software architectures supporting information visualization, it is often difficult to identify, evaluate, and re-apply the design solutions implemented within such frameworks. One popular and effective approach for addressing such difficulties is to capture successful solutions in design patterns, abstract descriptions of interacting software components that can be customized to solve design problems within a particular context. Based upon a review of existing frameworks and our own experiences building visualization software, we present a series of design patterns for the domain of information visualization. We discuss the structure, context of use, and interrelations of patterns spanning data representation, graphics, and interaction. By representing design knowledge in a reusable form, these patterns can be used to facilitate software design, implementation, and evaluation, and improve developer education and communication},
}
@article{p1263,
journal = {IEEE TVCG},
year = 2006,
title = {Spatial Analysis of News Sources},
doi = {10.1109/TVCG.2006.179},
url = {http://dx.doi.org/10.1109/TVCG.2006.179},
author = {Mehler, A. and Bao, Y. and Li, X. and Wang, Y. and Skiena, S.},
pages = {765--772},
keywords = {GIS, geographic visualization, text and document visualization, information analytics, WWW data visualization, spidering, newspapers},
abstract = {People in different places talk about different things. This interest distribution is reflected by the newspaper articles circulated in a particular area. We use data from our large-scale newspaper analysis system (Lydia) to make entity datamaps, a spatial visualization of the interest in a given named entity. Our goal is to identify entities which display regional biases. We develop a model of estimating the frequency of reference of an entity in any given city from the reference frequency centered in surrounding cities, and techniques for evaluating the spatial significance of this distribution},
}
@article{p1264,
journal = {IEEE TVCG},
year = 2006,
title = {The Perceptual Scalability of Visualization},
doi = {10.1109/TVCG.2006.184},
url = {http://dx.doi.org/10.1109/TVCG.2006.184},
author = {Yost, B. and North, C.},
pages = {837--844},
keywords = {Information visualization, large displays, empirical evaluation},
abstract = {Larger, higher resolution displays can be used to increase the scalability of information visualizations. But just how much can scalability increase using larger displays before hitting human perceptual or cognitive limits? Are the same visualization techniques that are good on a single monitor also the techniques that are best when they are scaled up using large, high-resolution displays? To answer these questions we performed a controlled experiment on user performance time, accuracy, and subjective workload when scaling up data quantity with different space-time-attribute visualizations using a large, tiled display. Twelve college students used small multiples, embedded bar matrices, and embedded time-series graphs either on a 2 megapixel (Mp) display or with data scaled up using a 32 Mp tiled display. Participants performed various overview and detail tasks on geospatially-referenced multidimensional time-series data. Results showed that current designs are perceptually scalable because they result in a decrease in task completion time when normalized per number of data attributes along with no decrease in accuracy. It appears that, for the visualizations selected for this study, the relative comparison between designs is generally consistent between display sizes. However, results also suggest that encoding is more important on a smaller display while spatial grouping is more important on a larger display. Some suggestions for designers are provided based on our experience designing visualizations for large displays.},
}
@article{p1265,
journal = {IEEE TVCG},
year = 2006,
title = {Topographic Visualization of Prefix Propagation in the Internet},
doi = {10.1109/TVCG.2006.185},
url = {http://dx.doi.org/10.1109/TVCG.2006.185},
author = {Cortese, P.F. and Di Battista, G. and Moneta, A. and Patrignani, M. and Pizzonia, M.},
pages = {725--732},
keywords = {Interdomain Routing, Internet Visualization, Graph Drawing, Spring Embedder},
abstract = {We propose a new metaphor for the visualization of prefixes propagation in the Internet. Such a metaphor is based on the concept of topographic map and allows to put in evidence the relative importance of the Internet Service Providers (ISPs) involved in the routing of the prefix. Based on the new metaphor we propose an algorithm for computing layouts and experiment with such algorithm on a test suite taken from the real Internet. The paper extends the visualization approach of the BGPlay service, which is an Internet routing monitoring tool widely used by ISP operators},
}
@article{p1266,
journal = {IEEE TVCG},
year = 2006,
title = {User Interaction with Scatterplots on Small Screens - A Comparative Evaluation of Geometric-Semantic Zoom and fisheye Distortion},
doi = {10.1109/TVCG.2006.187},
url = {http://dx.doi.org/10.1109/TVCG.2006.187},
author = {Yost, B. and North, C.},
pages = {829--836},
keywords = {Small screen, PDA, scatterplot, zoom, fisheye, focus+context},
abstract = {Larger, higher resolution displays can be used to increase the scalability of information visualizations. But just how much can scalability increase using larger displays before hitting human perceptual or cognitive limits? Are the same visualization techniques that are good on a single monitor also the techniques that are best when they are scaled up using large, high-resolution displays? To answer these questions we performed a controlled experiment on user performance time, accuracy, and subjective workload when scaling up data quantity with different space-time-attribute visualizations using a large, tiled display. Twelve college students used small multiples, embedded bar matrices, and embedded time-series graphs either on a 2 megapixel (Mp) display or with data scaled up using a 32 Mp tiled display. Participants performed various overview and detail tasks on geospatially-referenced multidimensional time-series data. Results showed that current designs are perceptually scalable because they result in a decrease in task completion time when normalized per number of data attributes along with no decrease in accuracy. It appears that, for the visualizations selected for this study, the relative comparison between designs is generally consistent between display sizes. However, results also suggest that encoding is more important on a smaller display while spatial grouping is more important on a larger display. Some suggestions for designers are provided based on our experience designing visualizations for large displays},
}
@article{p1267,
journal = {IEEE TVCG},
year = 2006,
title = {Visual Analysis of Multivariate State Transition Graphs},
doi = {10.1109/TVCG.2006.192},
url = {http://dx.doi.org/10.1109/TVCG.2006.192},
author = {Pretorius, A.J. and van Wijk, J.J.},
pages = {685--692},
keywords = {Graph visualization, multivariate visualization, interactive clustering, state spaces, transition systems, finite state machines},
abstract = {We present a new approach for the visual analysis of state transition graphs. We deal with multivariate graphs where a number of attributes are associated with every node. Our method provides an interactive attribute-based clustering facility. Clustering results in metric, hierarchical and relational data, represented in a single visualization. To visualize hierarchically structured quantitative data, we introduce a novel technique: the bar tree. We combine this with a node-link diagram to visualize the hierarchy and an arc diagram to visualize relational data. Our method enables the user to gain significant insight into large state transition graphs containing tens of thousands of nodes. We illustrate the effectiveness of our approach by applying it to a real-world use case. The graph we consider models the behavior of an industrial wafer stepper and contains 55 043 nodes and 289 443 edges},
}
@article{p1268,
journal = {IEEE TVCG},
year = 2006,
title = {Visual Exploration of Complex Time-Varying Graphs},
doi = {10.1109/TVCG.2006.193},
url = {http://dx.doi.org/10.1109/TVCG.2006.193},
author = {Archambault, D. and Munzner, T. and Auber, D.},
pages = {805--812},
keywords = {Graph and network visualization, financial data visualization, hierarchy visualization, time series data},
abstract = {Quasi-trees, namely graphs with tree-like structure, appear in many application domains, including bioinformatics and computer networks. Our new SPF approach exploits the structure of these graphs with a two-level approach to drawing, where the graph is decomposed into a tree of biconnected components. The low-level biconnected components are drawn with a force-directed approach that uses a spanning tree skeleton as a starting point for the layout. The higher-level structure of the graph is a true tree with meta-nodes of variable size that contain each biconnected component. That tree is drawn with a new area-aware variant of a tree drawing algorithm that handles high-degree nodes gracefully, at the cost of allowing edge-node overlaps. SPF performs an order of magnitude faster than the best previous approaches, while producing drawings of commensurate or improved quality},
}
@article{p1269,
journal = {IEEE TVCG},
year = 2006,
title = {Visualization of Barrier Tree Sequences},
doi = {10.1109/TVCG.2006.196},
url = {http://dx.doi.org/10.1109/TVCG.2006.196},
author = {Heine, C. and Scheuermann, G. and Flamm, C. and Hofacker, I.L. and Stadler, P.F.},
pages = {781--788},
keywords = {Graph drawing, dynamic graph, RNA folding, energy landscape, fitness landscape, barrier tree},
abstract = {Dynamical models that explain the formation of spatial structures of RNA molecules have reached a complexity that requires novel visualization methods that help to analyze the validity of these models. We focus on the visualization of so-called folding landscapes of a growing RNA molecule. Folding landscapes describe the energy of a molecule as a function of its spatial configuration; thus they are huge and high dimensional. Their most salient features, however, are encapsulated by their so-called barrier tree that reflects the local minima and their connecting saddle points. For each length of the growing RNA chain there exists a folding landscape. We visualize the sequence of folding landscapes by an animation of the corresponding barrier trees. To generate the animation, we adapt the foresight layout with tolerance algorithm for general dynamic graph layout problems. Since it is very general, we give a detailed description of each phase: constructing a supergraph for the trees, layout of that supergraph using a modified DOT algorithm, and presentation techniques for the final animation},
}
@article{p1270,
journal = {IEEE TVCG},
year = 2006,
title = {Visualization of Geo-spatial Point Sets via Global Shape Transformation and Local Pixel Placement},
doi = {10.1109/TVCG.2006.198},
url = {http://dx.doi.org/10.1109/TVCG.2006.198},
author = {Panse, C. and Sips, M. and Keim, D.A. and North, S.C.},
pages = {749--756},
keywords = {Geo-spatial Data, Shape Transformation, Cartogram, Pixel Visualization},
abstract = {In many applications, data is collected and indexed by geo-spatial location. Discovering interesting patterns through visualization is an important way of gaining insight about such data. A previously proposed approach is to apply local placement functions such as PixelMaps that transform the input data set into a solution set that preserves certain constraints while making interesting patterns more obvious and avoid data loss from overplotting. In experience, this family of spatial transformations can reveal fine structures in large point sets, but it is sometimes difficult to relate those structures to basic geographic features such as cities and regional boundaries. Recent information visualization research has addressed other types of transformation functions that make spatially-transformed maps with recognizable shapes. These types of spatial-transformation are called global shape functions. In particular, cartogram-based map distortion has been studied. On the other hand, cartogram-based distortion does not handle point sets readily. In this study, we present a framework that allows the user to specify a global shape function and a local placement function. We combine cartogram-based layout (global shape) with PixelMaps (local placement), obtaining some of the benefits of each toward improved exploration of dense geo-spatial data sets},
}
@article{p1271,
journal = {IEEE TVCG},
year = 2006,
title = {Visualizing Business Data with Generalized Treemaps},
doi = {10.1109/TVCG.2006.200},
url = {http://dx.doi.org/10.1109/TVCG.2006.200},
author = {Vliegen, R. and van Wijk, J.J. and van der Linden, E.-J.},
pages = {789--796},
keywords = {Information visualization, treemap, business graphics, hierarchical data},
abstract = {Business data is often presented using simple business graphics. These familiar visualizations are effective for providing overviews, but fall short for the presentation of large amounts of detailed information. Treemaps can provide such detail, but are often not easy to understand. We present how standard treemap algorithms can be adapted such that the results mimic familiar business graphics. Specifically, we present the use of different layout algorithms per level, a number of variations of the squarified algorithm, the use of variable borders, and the use of non-rectangular shapes. The combined use of these leads to histograms, pie charts and a variety of other styles},
}
@article{p1272,
journal = {IEEE TVCG},
year = 2006,
title = {Worldmapper: The World as You've Never Seen it Before},
doi = {10.1109/TVCG.2006.202},
url = {http://dx.doi.org/10.1109/TVCG.2006.202},
author = {Dorling, D. and Barford, A. and Newman, M.},
pages = {757--764},
keywords = {Geographic Visualization, Computer Graphics, Worldmapper, Data Visualization, Social Visualization, Cartogram},
abstract = {This paper describes the Worldmapper project, which makes use of novel visualization techniques to represent a broad variety of social and economic data about the countries of the world. The goal of the project is to use the map projections known as cartograms to depict comparisons and relations between different territories, and its execution raises many interesting design challenges that were not all apparent at the outset. We discuss the approaches taken towards these challenges, some of which may have considerably broad application. We conclude by commenting on the positive initial response to the Worldmapper images published on the Web, which we believe is due, at least in part, to the particular effectiveness of the cartogram as a tool for communicating quantitative geographic data},
}
@inproceedings{p1387,
booktitle = {Proc. InfoVis},
year = 2005,
title = {A note on space-filling visualizations and space-filling curves},
doi = {10.1109/INFVIS.2005.1532145},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532145},
author = {Wattenberg, M.},
pages = {181--186},
keywords = {Hierarchy Visualization},
abstract = {A recent line of treemap research has focused on layout algorithms that optimize properties such as stability, preservation of ordering information, and aspect ratio of rectangles. No ideal treemap layout algorithm has been found, and so it is natural to explore layouts that produce nonrectangular regions. This note describes a connection between space-filling visualizations and the mathematics of space-filling curves, and uses that connection to characterize a family of layout algorithms which produce nonrectangular regions but enjoy geometric continuity under changes to the data and legibility even for highly unbalanced trees.},
}
@inproceedings{p1388,
booktitle = {Proc. InfoVis},
year = 2005,
title = {A sky dome visualisation for identification of astronomical orientations},
doi = {10.1109/INFVIS.2005.1532123},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532123},
author = {Zotti, G. and Groller, E.},
pages = {8--15},
keywords = { Archaeology, Astronomy, data mining},
abstract = {It has long been known that ancient temples were frequently oriented along the cardinal directions or to certain points along the horizon where Sun or Moon rise or set on special days of the year. In the last decades, archaeologists have found evidence of even older building structures buried in the soil, with doorways that also appear to have distinct orientations. This paper presents a novel diagram combining archaeological maps with a folded-apart, flattened view of the whole sky, showing the local horizon and the daily paths of Sun, Moon and brighter stars. By use of this diagram, interesting groupings of astronomical orientation directions, e.g. to certain Sunrise and Sunset points could be identified, which were evidently used to mark certain days of the year. Orientations to a few significant stars very likely indicated the beginning of the agricultural year in the middle neolithic period},
}
@inproceedings{p1389,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Adapting the cognitive walkthrough method to assess the usability of a knowledge domain visualization},
doi = {10.1109/INFVIS.2005.1532147},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532147},
author = {Allendoerfer, K. and Aluker, S. and Panjwani, G. and Proctor, J. and Sturtz, D. and Vukovic, M. and Chen, C.},
pages = {195--202},
keywords = {Cognitive Walkthrough, usability inspection methods, bibliographic networks},
abstract = {The usability of knowledge domain visualization (KDViz) tools can be assessed at several levels. Cognitive walkthrough (CW) is a well known usability inspection method that focuses on how easily users can learn software through exploration. Typical applications of CW follow structured tasks where user goals and action sequences that lead to achievement of the goals are well defined. KDViz and other information visualization tools, however, are typically designed for users to explore data and user goals and actions are less well understood. In this paper, we describe how the traditional CW method may be adapted for assessing the usability of these systems. We apply the adapted version of CW to CiteSpace, a KDViz tool that uses bibliometric analyses to create visualizations of scientific literatures. We describe usability issues identified by the adapted CW and discuss how CiteSpace supported the completion of tasks, such as identifying research fronts, and the achievement of goals. Finally, we discuss improvements to the adapted CW and issues to be addressed before applying it to a wider range of KDViz tools.},
}
@inproceedings{p1390,
booktitle = {Proc. InfoVis},
year = 2005,
title = {An evaluation of content browsing techniques for hierarchical space-filling visualizations},
doi = {10.1109/INFVIS.2005.1532132},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532132},
author = {Kang Shi and Irani, P. and Li, B.},
pages = {81--88},
keywords = {browsing, distortion, hierarchy navigation, focus+context, drill-down, space-filling visualization, TreeMap, semantic zooming},
abstract = {Space-filling visualizations, such as the TreeMap, are well suited for displaying the properties of nodes in hierarchies. To browse the contents of the hierarchy, the primary mode of interaction is by drilling down through many successive layers. In this paper we introduce a distortion algorithm based on fisheye and continuous zooming techniques for browsing data in the TreeMap representation. The motivation behind the distortion approach is for assisting users to rapidly browse information displayed in the TreeMap without opening successive layers of the hierarchy. Two experiments were conducted to evaluate the new approach. In the first experiment (N=20) the distortion approach is compared to the drill down method. Results show that subjects are quicker and more accurate in locating targets of interest using the distortion method. The second experiment (N=12) evaluates the effectiveness of the two approaches in a task requiring context, we define as the context browsing task. The results show that subjects are quicker and more accurate in locating targets with the distortion technique in the context browsing task.},
}
@inproceedings{p1391,
booktitle = {Proc. InfoVis},
year = 2005,
title = {An interactive 3D integration of parallel coordinates and star glyphs},
doi = {10.1109/INFVIS.2005.1532141},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532141},
author = {Fanea, E. and Carpendale, S. and Isenberg, T.},
pages = {149--156},
keywords = {Parallel Glyphs, parallel coordinates, star glyphs, multi-dimensional data sets, 3D visualization},
abstract = {Parallel coordinates are a powerful method for visualizing multidimensional data but, when applied to large data sets, they become cluttered and difficult to read. Star glyphs, on the other hand, can be used to display either the attributes of a data item or the values across all items for a single attribute. Star glyphs may readily provide a quick impression; however, since the full data set require multiple glyphs, overall readings are more difficult. We present parallel glyphs, an interactive integration of the visual representations of parallel coordinates and star glyphs that utilizes the advantages of both representations to offset the disadvantages they have separately. We discuss the role of uniform and stepped colour scales in the visual comparison of non-adjacent items and star glyphs. Parallel glyphs provide capabilities for focus-in-context exploration using two types of lenses and interactions specific to the 3D space.},
}
@inproceedings{p1392,
booktitle = {Proc. InfoVis},
year = 2005,
title = {An optimization-based approach to dynamic visual context management},
doi = {10.1109/INFVIS.2005.1532146},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532146},
author = {Zhen Wen and Zhou, M.X. and Aggarwal, V.},
pages = {187--194},
keywords = {intelligent multimodal interfaces, visual context management, automated generation of visualization, visual momentum},
abstract = {We are building an intelligent multimodal conversation system to aid users in exploring large and complex data sets. To tailor to diverse user queries introduced during a conversation, we automate the generation of system responses, including both spoken and visual outputs. In this paper, we focus on the problem of visual context management, a process that dynamically updates an existing visual display to effectively incorporate new information requested by subsequent user queries. Specifically, we develop an optimization based approach to visual context management. Compared to existing approaches, which normally handle predictable visual context updates, our work offers two unique contributions. First, we provide a general computational framework that can effectively manage a visual context for diverse, unanticipated situations encountered in a user system conversation. Moreover, we optimize the satisfaction of both semantic and visual constraints, which otherwise are difficult to balance using simple heuristics. Second, we present an extensible representation model that uses feature based metrics to uniformly define all constraints. We have applied our work to two different applications and our evaluation has shown the promise of this work.},
}
@inproceedings{p1393,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Baby names, visualization, and social data analysis},
doi = {10.1109/INFVIS.2005.1532122},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532122},
author = {Wattenberg, M.},
pages = {1--7},
keywords = {Design Study, Time-Varying Data Visualization, Human-Computer Interaction},
abstract = {The Name Voyager, a Web based visualization of historical trends in baby naming, has proven remarkably popular. This paper discusses the interaction techniques it uses for smooth visual exploration of thousands of time series. We also describe design decisions behind the application and lessons learned in creating an application that makes do-it-yourself data mining popular. The prime lesson, it is hypothesized, is that an information visualization tool may be fruitfully viewed not as a tool but as part of an online social environment. In other words, to design a successful exploratory data analysis tool, one good strategy is to create a system that enables "social" data analysis},
}
@inproceedings{p1394,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Dig-CoLa: directed graph layout through constrained energy minimization},
doi = {10.1109/INFVIS.2005.1532130},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532130},
author = {Dwyer, T. and Koren, Y.},
pages = {65--72},
keywords = {},
abstract = {We describe a new method for visualization of directed graphs. The method combines constraint programming techniques with a high performance force directed placement (FDP) algorithm so that the directed nature of the graph is highlighted while useful properties of FDP - such as emphasis of symmetries and preservation of proximity relations - are retained. Our algorithm automatically identifies those parts of the digraph that contain hierarchical information and draws them accordingly. Additionally, those parts that do not contain hierarchy are drawn at the same quality expected from a nonhierarchical, undirected layout algorithm. An interesting application of our algorithm is directional multidimensional scaling (DMDS). DMDS deals with low dimensional embedding of multivariate data where we want to emphasize the overall flow in the data (e.g. chronological progress) along one of the axes.},
}
@inproceedings{p1395,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Dynamic visualization of graphs with extended labels},
doi = {10.1109/INFVIS.2005.1532131},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532131},
author = {Pak Chung Wong and Mackey, P. and Perrine, K. and Eagan, J. and Foote, H. and Thomas, J.},
pages = {73--80},
keywords = {Graph Label Placement, Dynamic Animation, Graph Visualization, Information Visualization},
abstract = {The paper describes a novel technique to visualize graphs with extended node and link labels. The lengths of these labels range from a short phrase to a full sentence to an entire paragraph and beyond. Our solution is different from all the existing approaches that almost always rely on intensive computational effort to optimize the label placement problem. Instead, we share the visualization resources with the graph and present the label information in static, interactive, and dynamic modes without the requirement for tackling the intractability issues. This allows us to reallocate the computational resources for dynamic presentation of real time information. The paper includes a user study to evaluate the effectiveness and efficiency of the visualization technique.},
}
@inproceedings{p1396,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Elastic hierarchies: combining treemaps and node-link diagrams},
doi = {10.1109/INFVIS.2005.1532129},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532129},
author = {Shengdong Zhao and McGuffin, M.J. and Chignell, M.H.},
pages = {57--64},
keywords = {Elastic Hierarchies, Treemaps, node-link diagrams, hybrids, combinations, overview+detail, multiple views, trees, interaction techniques, interactive visualization},
abstract = {We investigate the use of elastic hierarchies for representing trees, where a single graphical depiction uses a hybrid mixture, or "interleaving", of more basic forms at different nodes of the tree. In particular, we explore combinations of node link and treemap forms, to combine the space efficiency of treemaps with the structural clarity of node link diagrams. A taxonomy is developed to characterize the design space of such hybrid combinations. A software prototype is described, which we used to explore various techniques for visualizing, browsing and interacting with elastic hierarchies, such as side by side overview and detail views, highlighting and rubber banding across views, visualization of multiple foci, and smooth animations across transitions. The paper concludes with a discussion of the characteristics of elastic hierarchies and suggestions for research on their properties and uses.},
}
@inproceedings{p1397,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Flow map layout},
doi = {10.1109/INFVIS.2005.1532150},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532150},
author = {Doantam Phan and Ling Xiao and Yeh, R. and Hanrahan, P.},
pages = {219--224},
keywords = {flow maps, GIS, hierarchical clustering},
abstract = {Cartographers have long used flow maps to show the movement of objects from one location to another, such as the number of people in a migration, the amount of goods being traded, or the number of packets in a network. The advantage of flow maps is that they reduce visual clutter by merging edges. Most flow maps are drawn by hand and there are few computer algorithms available. We present a method for generating flow maps using hierarchical clustering given a set of nodes, positions, and flow data between the nodes. Our techniques are inspired by graph layout algorithms that minimize edge crossings and distort node positions while maintaining their relative position to one another. We demonstrate our technique by producing flow maps for network traffic, census data, and trade data.},
}
@inproceedings{p1398,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Graph-theoretic scagnostics},
doi = {10.1109/INFVIS.2005.1532142},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532142},
author = {Wilkinson, L. and Anand, A. and Grossman, R.},
pages = {157--164},
keywords = {visualization, statistical graphics},
abstract = {We introduce Tukey and Tukey scagnostics and develop graph-theoretic methods for implementing their procedure on large datasets.},
}
@inproceedings{p1399,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Highlighting conflict dynamics in event data},
doi = {10.1109/INFVIS.2005.1532135},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532135},
author = {Brandes, U. and Fleischer, D. and Lerner, J.},
pages = {103--110},
keywords = {information visualization, text mining, event analysis, time-dependent visualization},
abstract = {We present a method for visual summary of bilateral conflict structures embodied in event data. Such data consists of actors linked by time stamped events, and may be extracted from various sources such as news reports and dossiers. When analyzing political events, it is of particular importance to be able to recognize conflicts and actors involved in them. By projecting actors into a conflict space, we are able to highlight the main opponents in a series of tens of thousands of events, and provide a graphic overview of the conflict structure. Moreover, our method allows for smooth animation of the dynamics of a conflict.},
}
@inproceedings{p1400,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Importance-driven visualization layouts for large time series data},
doi = {10.1109/INFVIS.2005.1532148},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532148},
author = {Hao, M.C. and Dayal, U. and Keim, D.A. and Schreck, T.},
pages = {203--210},
keywords = {Information Visualization, Time Series, Space-Filling Layout Generation},
abstract = {Time series are an important type of data with applications in virtually every aspect of the real world. Often a large number of time series have to be monitored and analyzed in parallel. Sets of time series may show intrinsic hierarchical relationships and varying degrees of importance among the individual time series. Effective techniques for visually analyzing large sets of time series should encode the relative importance and hierarchical ordering of the time series data by size and position, and should also provide a high degree of regularity in order to support comparability by the analyst. In this paper, we present a framework for visualizing large sets of time series. Based on the notion of inter time series importance relationships, we define a set of objective functions that space-filling layout schemes for time series data should obey. We develop an efficient algorithm addressing the identified problems by generating layouts that reflect hierarchy and importance based relationships in a regular layout with favorable aspect ratios. We apply our technique to a number of real world data sets including sales and stock data, and we compare our technique with an aspect ratio aware variant of the well known TreeMap algorithm. The examples show the advantages and practical usefulness of our layout algorithm.},
}
@inproceedings{p1401,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Interactive Sankey diagrams},
doi = {10.1109/INFVIS.2005.1532152},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532152},
author = {Riehmann, P. and Hanfler, M. and Froehlich, B.},
pages = {233--240},
keywords = {Sankey diagram, flow diagram},
abstract = {We present a system that allows users to interactively explore complex flow scenarios represented as Sankey diagrams. Our system provides an overview of the flow graph and allows users to zoom in and explore details on demand. The support for quantitative flow tracing across the flow graph as well as representations at different levels of detail facilitate the understanding of complex flow situations. The energy flow in a city serves as a sample scenario for our system. Different forms of energy are distributed within the city and they are transformed into heat, electricity, or other forms of energy. These processes are visualized and interactively explored. In addition our system can be used as a planning tool for the exploration of alternative scenarios by interactively manipulating different parameters in the energy flow network.},
}
@inproceedings{p1402,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Interactive visualization of genealogical graphs},
doi = {10.1109/INFVIS.2005.1532124},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532124},
author = {McGuffin, M.J. and Balakrishnan, R.},
pages = {16--23},
keywords = {genealogy, genealogies, family trees, kinship, multi-trees, graph drawing, graph theory, graph browsing and navigation},
abstract = {The general problem of visualizing "family trees", or genealogical graphs, in 2D, is considered. A graph theoretic analysis is given, which identifies why genealogical graphs can be difficult to draw. This motivates some novel graphical representations, including one based on a dual tree, a subgraph formed by the union of two trees. Dual trees can be drawn in various styles, including an indented outline style, and allow users to browse general multitrees in addition to genealogical graphs, by transitioning between different dual tree views. A software prototype for such browsing is described, that supports smoothly animated transitions, automatic camera framing, rotation of subtrees, and a novel interaction technique for expanding or collapsing subtrees to any depth with a single mouse drag},
}
@inproceedings{p1403,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Low-level components of analytic activity in information visualization},
doi = {10.1109/INFVIS.2005.1532136},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532136},
author = {Amar, R. and Eagan, J. and Stasko, J.},
pages = {111--117},
keywords = {Analytic activity, taxonomy, knowledge discovery, design, evaluation},
abstract = {Existing system level taxonomies of visualization tasks are geared more towards the design of particular representations than the facilitation of user analytic activity. We present a set of ten low level analysis tasks that largely capture people's activities while employing information visualization tools for understanding data. To help develop these tasks, we collected nearly 200 sample questions from students about how they would analyze five particular data sets from different domains. The questions, while not being totally comprehensive, illustrated the sheer variety of analytic questions typically posed by users when employing information visualization systems. We hope that the presented set of tasks is useful for information visualization system designers as a kind of common substrate to discuss the relative analytic capabilities of the systems. Further, the tasks may provide a form of checklist for system designers.},
}
@inproceedings{p1404,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Multivariate glyphs for multi-object clusters},
doi = {10.1109/INFVIS.2005.1532140},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532140},
author = {Chlan, E.B. and Rheingans, P.},
pages = {141--148},
keywords = {information visualization, multivariate visualization, distribution, aggregated data},
abstract = {Aggregating items can simplify the display of huge quantities of data values at the cost of losing information about the attribute values of the individual items. We propose a distribution glyph, in both two- and three-dimensional forms, which specifically addresses the concept of how the aggregated data is distributed over the possible range of values. It is capable of displaying distribution, variability and extent information for up to four attributes at a time of multivariate, clustered data. User studies validate the concept, showing that both glyphs are just as good as raw data and the 3D glyph is better for answering some questions.},
}
@inproceedings{p1405,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Parallel sets: visual analysis of categorical data},
doi = {10.1109/INFVIS.2005.1532139},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532139},
author = {Bendix, F. and Kosara, R. and Hauser, H.},
pages = {133--140},
keywords = { categorical data, meta information, interaction},
abstract = {The discrete nature of categorical data makes it a particular challenge for visualization. Methods that work very well for continuous data are often hardly usable with categorical dimensions. Only few methods deal properly with such data, mostly because of the discrete nature of categorical data, which does not translate well into the continuous domains of space and color. Parallel sets is a new visualization method that adopts the layout of parallel coordinates, but substitutes the individual data points by a frequency based representation. This abstracted view, combined with a set of carefully designed interactions, supports visual data analysis of large and complex data sets. The technique allows efficient work with meta data, which is particularly important when dealing with categorical datasets. By creating new dimensions from existing ones, for example, the user can filter the data according to his or her current needs. We also present the results from an interactive analysis of CRM data using parallel sets. We demonstrate how the flexible layout eases the process of knowledge crystallization, especially when combined with a sophisticated interaction scheme.},
}
@inproceedings{p1406,
booktitle = {Proc. InfoVis},
year = 2005,
title = {PRISAD: a partitioned rendering infrastructure for scalable accordion drawing},
doi = {10.1109/INFVIS.2005.1532127},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532127},
author = {Slack, J. and Hildebrand, K. and Munzner, T.},
pages = {41--48},
keywords = {Focus+Context, Information Visualization, Real Time Rendering, Progressive Rendering},
abstract = {We present PRISAD, the first generic rendering infrastructure for information visualization applications that use the accordion drawing technique: rubber sheet navigation with guaranteed visibility for marked areas of interest. Our new rendering algorithms are based on the partitioning of screen space, which allows us to handle dense dataset regions correctly. The algorithms in previous work led to incorrect visual representations because of overculling, and to inefficiencies due to overdrawing multiple items in the same region. Our pixel based drawing infrastructure guarantees correctness by eliminating overculling, and improves rendering performance with tight bounds on overdrawing. PRITree and PRISeq are applications built on PRISAD, with the feature sets of TreeJuxtaposer and SequenceJuxtaposer, respectively. We describe our PRITree and PRISeq dataset traversal algorithms, which are used for efficient rendering, culling, and layout of datasets within the PRISAD framework. We also discuss PRITree node marking techniques, which offer order-of-magnitude improvements to both memory and time performance versus previous range storage and retrieval techniques. Our PRITree implementation features a five fold increase in rendering speed for nontrivial tree structures, and also reduces memory requirements in some real world datasets by up to eight times, so we are able to handle trees of several million nodes. PRISeq renders fifteen times faster and handles datasets twenty times larger than previous work.},
}
@inproceedings{p1407,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Revealing structure within clustered parallel coordinates displays},
doi = {10.1109/INFVIS.2005.1532138},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532138},
author = {Johansson, J. and Ljung, P. and Jern, M. and Cooper, M.},
pages = {125--132},
keywords = {Parallel coordinates, clustering, transfer function, feature animation},
abstract = {In order to gain insight into multivariate data, complex structures must be analysed and understood. Parallel coordinates is an excellent tool for visualizing this type of data but has its limitations. This paper deals with one of its main limitations - how to visualize a large number of data items without hiding the inherent structure they constitute. We solve this problem by constructing clusters and using high precision textures to represent them. We also use transfer functions that operate on the high precision textures in order to highlight different aspects of the cluster characteristics. Providing predefined transfer functions as well as the support to draw customized transfer functions makes it possible to extract different aspects of the data. We also show how feature animation can be used as guidance when simultaneously analysing several clusters. This technique makes it possible to visually represent statistical information about clusters and thus guides the user, making the analysis process more efficient.},
}
@inproceedings{p1408,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Simple 3D glyphs for spatial multivariate data},
doi = {10.1109/INFVIS.2005.1532137},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532137},
author = {Forsell, C. and Seipel, S. and Lind, M.},
pages = {119--124},
keywords = {multidimensional visualization, perception, 3D glyphs},
abstract = {We present an effort to evaluate the possible utility of a new type of 3D glyphs intended for visualizations of multivariate spatial data. They are based on results from vision research suggesting that our perception of metric 3D structure is distorted and imprecise relative to the actual scene before us (e.g., "metric 3D structure in visualizations" by M. Lind et al. (2003)); only a class of qualitative properties of the scene is perceived with accuracy. These properties are best characterized as being invariant over affine but not Euclidean transformations. They are related, but not identical to, the non-accidental properties (NAPs) described by Lowe in "perceptual organization and visual recognition" (1984) on which the notion of geons is based in "recognition by components - a theory of image understanding" by I. Biederman (1987). A large number of possible 3D glyphs for the visualization of spatial data can be constructed using such properties. One group is based on the local sign of surface curvature. We investigated these properties in a visualization experiment. The results are promising and the implications for visualization are discussed.},
}
@inproceedings{p1409,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Temporal visualization of planning polygons for efficient partitioning of geo-spatial data},
doi = {10.1109/INFVIS.2005.1532149},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532149},
author = {Poonam Shanbhag and Rheingans, P. and desJardins, M.},
pages = {211--218},
keywords = {Temporal visualization, time-dependent attributes, spatial data, multi-attribute visualization, resource allocation},
abstract = {Partitioning of geo-spatial data for efficient allocation of resources such as schools and emergency health care services is driven by a need to provide better and more effective services. Partitioning of spatial data is a complex process that depends on numerous factors such as population, costs incurred in deploying or utilizing resources and target capacity of a resource. Moreover, complex data such as population distributions are dynamic i.e. they may change over time. Simple animation may not effectively show temporal changes in spatial data. We propose the use of three temporal visualization techniques -wedges, rings and time slices - to display the nature of change in temporal data in a single view. Along with maximizing resource utilization and minimizing utilization costs, a partition should also ensure the long term effectiveness of the plan. We use multi-attribute visualization techniques to highlight the strengths and identify the weaknesses of a partition. Comparative visualization techniques allow multiple partitions to be viewed simultaneously. Users can make informed decisions about how to partition geo spatial data by using a combination of our techniques for multi-attribute visualization, temporal visualization and comparative visualization.},
}
@inproceedings{p1410,
booktitle = {Proc. InfoVis},
year = 2005,
title = {The visual code navigator: an interactive toolset for source code investigation},
doi = {10.1109/INFVIS.2005.1532125},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532125},
author = {Lommerse, G. and Nossin, F. and Voinea, L. and Telea, A.},
pages = {24--31},
keywords = {source code visualization, multiple views, treemaps, pixel-filling displays, source code analysis},
abstract = {We present the Visual Code Navigator, a set of three interrelated visual tools that we developed for exploring large source code software projects from three different perspectives, or views: the syntactic view shows the syntactic constructs in the source code. The symbol view shows the objects a file makes available after compilation, such as function signatures, variables, and namespaces. The evolution view looks at different versions in a project lifetime of a number of selected source files. The views share one code model, which combines hierarchical syntax based and line based information from multiple source files versions. We render this code model using a visual model that extends the pixel-filling, space partitioning properties of shaded cushion treemaps with novel techniques. We discuss how our views allow users to interactively answer complex questions on various code elements by simple mouse clicks. We validate the efficiency and effectiveness of our toolset by an informal user study on the source code of VTK, a large, industry-size C++ code base},
}
@misc{p1411,
year = 2005,
title = {Turning information visualization innovations into commercial products: lessons to guide the next success},
doi = {10.1109/INFVIS.2005.1532153},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532153},
author = {Shneiderman, B. and Rao, R. and Andrews, K. and Ahlberg, C. and Brodbeck, D. and Jewitt, T. and Mackinlay, J.},
pages = {241--244},
keywords = {},
abstract = {As information visualization matures as an academic research field, commercial spinoffs are proliferating, but success stories are harder to find. This is the normal process of emergence for new technologies, but the panel organizers believe that there are certain strategies that facilitate success. To teach these lessons, we have invited several key figures who are seeking to commercialize information visualization tools. The panelists make short presentations, engage in a moderated discussion, and respond to audience questions.},
}
@inproceedings{p1412,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Turning the bucket of text into a pipe},
doi = {10.1109/INFVIS.2005.1532133},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532133},
author = {Hetzler, E. and Crow, V. and Payne, D.A. and Turner, A.},
pages = {89--94},
keywords = {Information Visualization, Dynamic visualization, User interaction design, real-time updating},
abstract = {Many visual analysis tools operate on a fixed set of data. However, professional information analysts follow issues over a period of time and need to be able to easily add new documents to an ongoing exploration. Some analysts handle documents in a moving window of time, with new documents constantly added and old ones aging out. This paper describes both the user interaction and the technical implementation approach for a visual analysis system designed to support constantly evolving text collections.},
}
@inproceedings{p1413,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Two-tone pseudo coloring: compact visualization for one-dimensional data},
doi = {10.1109/INFVIS.2005.1532144},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532144},
author = {Saito, T. and Miyamura, H.N. and Yamamoto, M. and Saito, H. and Hoshiya, Y. and Kaseda, T.},
pages = {173--180},
keywords = {pseudo color, overview, detail, focus+context, data density},
abstract = {A new pseudo coloring technique for large scale one-dimensional datasets is proposed. For visualization of a large scale dataset, user interaction is indispensable for selecting focus areas in the dataset. However, excessive switching of the visualized image makes it difficult for the user to recognize overview/ detail and detail/ detail relationships. The goal of this research is to develop techniques for visualizing details as precisely as possible in overview display. In this paper, visualization of a one-dimensional but very large dataset is considered. The proposed method is based on pseudo coloring, however, each scalar value corresponds to two discrete colors. By painting with two colors at each value, users can read out the value precisely. This method has many advantages: it requires little image space for visualization; both the overview and details of the dataset are visible in one image without distortion; and implementation is very simple. Several application examples, such as meteorological observation data and train convenience evaluation data, show the effectiveness of the method.},
}
@inproceedings{p1414,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Visual correlation for situational awareness},
doi = {10.1109/INFVIS.2005.1532134},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532134},
author = {Livnat, Y. and Agutter, J. and Shaun Moon and Foresti, S.},
pages = {95--102},
keywords = {situation awareness, network intrusion, visualization},
abstract = {We present a novel visual correlation paradigm for situational awareness (SA) and suggest its usage in a diverse set of applications that require a high level of SA. Our approach is based on a concise and scalable representation, which leads to a flexible visualization tool that is both clear and intuitive to use. Situational awareness is the continuous extraction of environmental information, its integration with previous knowledge to form a coherent mental picture, and the use of that picture in anticipating future events. In this paper we build on our previous work on visualization for network intrusion detection and show how that approach can be generalized to encompass a much broader class of SA systems. We first propose a generalization that is based on what we term, the w3 premise, namely that each event must have at least the what, when and where attributes. We also present a second generalization, which increases flexibility and facilitates complex visual correlations. Finally, we demonstrate the generality of our approaches by applying our visualization paradigm in a collection of diverse SA areas.},
}
@inproceedings{p1415,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Visualization of graphs with associated timeseries data},
doi = {10.1109/INFVIS.2005.1532151},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532151},
author = {Saraiya, P. and Lee, P. and North, C.},
pages = {225--232},
keywords = {Graph visualization, data overlay, timeseries data analysis, usability experiments},
abstract = {The most common approach to support analysis of graphs with associated time series data include: overlay of data on graph vertices for one timepoint at a time by manipulating a visual property (e.g. color) of the vertex, along with sliders or some such mechanism to animate the graph for other timepoints. Alternatively, data from all the timepoints can be overlaid simultaneously by embedding small charts into graph vertices. These graph visualizations may also be linked to other visualizations (e.g., parallel co-ordinates) using brushing and linking. This paper describes a study performed to evaluate and rank graph+timeseries visualization options based on users' performance time and accuracy of responses on predefined tasks. The results suggest that overlaying data on graph vertices one timepoint at a time may lead to more accurate performance for tasks involving analysis of a graph at a single timepoint, and comparisons between graph vertices for two distinct timepoints. Overlaying data simultaneously for all the timepoints on graph vertices may lead to more accurate and faster performance for tasks involving searching for outlier vertices displaying different behavior than the rest of the graph vertices for all timepoints. Single views have advantage over multiple views on tasks that require topological information. Also, the number of attributes displayed on nodes has a non trivial influence on accuracy of responses, whereas the number of visualizations affect the performance time.},
}
@inproceedings{p1416,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Visualizing coordination in situ},
doi = {10.1109/INFVIS.2005.1532143},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532143},
author = {Weaver, C.},
pages = {165--172},
keywords = {coordination, exploratory visualization, linked views, software visualization, metavisualization},
abstract = {Exploratory visualization environments allow users to build and browse coordinated multiview visualizations interactively. As the number of views and amount of coordination increases, conceptualizing coordination structure becomes more and more important for successful data exploration. Integrated metavisualization is exploratory visualization of coordination and other interactive structure directly inside a visualization's own user interface. This paper presents a model of integrated metavisualization, describes the problem of capturing dynamic interface structure as visualizable data, and outlines three general approaches to integration. Metavisualization has been implemented in improvise, using views, lenses, and embedding to reveal the dynamic structure of its own highly coordinated visualizations.},
}
@inproceedings{p1417,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Vizster: visualizing online social networks},
doi = {10.1109/INFVIS.2005.1532126},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532126},
author = {Heer, J. and Boyd, D.},
pages = {32--39},
keywords = {social networks, visualization, graphs, community,data mining, exploration, play},
abstract = {Recent years have witnessed the dramatic popularity of online social networking services, in which millions of members publicly articulate mutual "friendship" relations. Guided by ethnographic research of these online communities, we have designed and implemented a visualization system for playful end-user exploration and navigation of large scale online social networks. Our design builds upon familiar node link network layouts to contribute customized techniques for exploring connectivity in large graph structures, supporting visual search and analysis, and automatically identifying and visualizing community structures. Both public installation and controlled studies of the system provide evidence of the system's usability, capacity for facilitating discovery, and potential for fun and engaged social activity},
}
@inproceedings{p1418,
booktitle = {Proc. InfoVis},
year = 2005,
title = {Voronoi treemaps},
doi = {10.1109/INFVIS.2005.1532128},
url = {http://dx.doi.org/10.1109/INFVIS.2005.1532128},
author = {Balzer, M. and Deussen, O.},
pages = {49--56},
keywords = {Voronoi Treemaps, Information Visualization, Hierarchies, Trees, Treemaps, Voronoi Tessellations},
abstract = {Treemaps are a well known method for the visualization of attributed hierarchical data. Previously proposed treemap layout algorithms are limited to rectangular shapes, which cause problems with the aspect ratio of the rectangles as well as with identifying the visualized hierarchical structure. The approach of Voronoi treemaps presented in this paper eliminates these problems through enabling subdivisions of and in polygons. Additionally, this allows for creating treemap visualizations within areas of arbitrary shape, such as triangles and circles, thereby enabling a more flexible adaptation of treemaps for a wider range of applications.},
}
@inproceedings{p1507,
booktitle = {Proc. InfoVis},
year = 2004,
title = {A Comparison of the Readability of Graphs Using Node-Link and Matrix-Based Representations},
doi = {10.1109/INFVIS.2004.1},
url = {http://dx.doi.org/10.1109/INFVIS.2004.1},
author = {Ghoniem, M. and Fekete, J. and Castagliola, P.},
pages = {17--24},
keywords = {Visualization of graphs, adjacency matrices, node-link representation, readability, evaluation},
abstract = {In this paper, we describe a taxonomy of generic graph related tasks and an evaluation aiming at assessing the readability of two representations of graphs: matrix-based representations and node-link diagrams. This evaluation bears on seven generic tasks and leads to important recommendations with regard to the representation of graphs according to their size and density. For instance, we show that when graphs are bigger than twenty vertices, the matrix-based visualization performs better than node-link diagrams on most tasks. Only path finding is consistently in favor of node-link diagrams throughout the evaluation},
}
@inproceedings{p1508,
booktitle = {Proc. InfoVis},
year = 2004,
title = {A History Mechanism for Visual Data Mining},
doi = {10.1109/INFVIS.2004.2},
url = {http://dx.doi.org/10.1109/INFVIS.2004.2},
author = {Kreuseler, M. and Nocke, T. and Schumann, H.},
pages = {49--56},
keywords = {Visual data mining, Visualization, History, Undo/Redo},
abstract = {A major challenge of current visualization and visual data mining (VDM) frameworks is to support users in the orientation in complex visual mining scenarios. An important aspect to increase user support and user orientation is to use a history mechanism that, first of all, provides un- and redoing functionality. In this paper, we present a new approach to include such history functionality into a VDM framework. Therefore, we introduce the theoretical background, outline design and implementation aspects of a history management unit, and conclude with a discussion showing the usefulness of our history management in a VDM framework},
}
@inproceedings{p1509,
booktitle = {Proc. InfoVis},
year = 2004,
title = {A Knowledge Task-Based Framework for Design and Evaluation of Information Visualizations},
doi = {10.1109/INFVIS.2004.10},
url = {http://dx.doi.org/10.1109/INFVIS.2004.10},
author = {Amar, R. and Stasko, J.},
pages = {143--150},
keywords = {Information visualization, analytic gap, theory, framework, evaluation, knowledge tasks},
abstract = {The design and evaluation of most current information visualization systems descend from an emphasis on a user's ability to "unpack" the representations of data of interest and operate on them independently. Too often, successful decision-making and analysis are more a matter of serendipity and user experience than of intentional design and specific support for such tasks; although humans have considerable abilities in analyzing relationships from data, the utility of visualizations remains relatively variable across users, data sets, and domains. In this paper, we discuss the notion of analytic gaps, which represent obstacles faced by visualizations in facilitating higher-level analytic tasks, such as decision-making and learning. We discuss support for bridging the analytic gap, propose a framework for design and evaluation of information visualization systems, and demonstrate its use},
}
@inproceedings{p1510,
booktitle = {Proc. InfoVis},
year = 2004,
title = {A Rank-by-Feature Framework for Unsupervised Multidimensional Data Exploration Using Low Dimensional Projections},
doi = {10.1109/INFVIS.2004.3},
url = {http://dx.doi.org/10.1109/INFVIS.2004.3},
author = {Jinwook Seo and Shneiderman, B.},
pages = {65--72},
keywords = {information visualization, exploratory data analysis, dynamic query, feature detection/selection, statistical graphics},
abstract = {Exploratory analysis of multidimensional data sets is challenging because of the difficulty in comprehending more than three dimensions. Two fundamental statistical principles for the exploratory analysis are (1) to examine each dimension first and then find relationships among dimensions, and (2) to try graphical displays first and then find numerical summaries (D.S. Moore, (1999). We implement these principles in a novel conceptual framework called the rank-by-feature framework. In the framework, users can choose a ranking criterion interesting to them and sort 1D or 2D axis-parallel projections according to the criterion. We introduce the rank-by-feature prism that is a color-coded lower-triangular matrix that guides users to desired features. Statistical graphs (histogram, boxplot, and scatterplot) and information visualization techniques (overview, coordination, and dynamic query) are combined to help users effectively traverse 1D and 2D axis-parallel projections, and finally to help them interactively find interesting features},
}
@misc{p1511,
year = 2004,
title = {An Associative Information Visualizer},
doi = {10.1109/INFVIS.2004.4},
url = {http://dx.doi.org/10.1109/INFVIS.2004.4},
author = {White, H.D. and Lin, X. and Buzydlowski, J.},
pages = {r8--r8},
keywords = {},
abstract = {},
}
@inproceedings{p1512,
booktitle = {Proc. InfoVis},
year = 2004,
title = {An Evaluation of Microarray Visualization Tools for Biological Insight},
doi = {10.1109/INFVIS.2004.5},
url = {http://dx.doi.org/10.1109/INFVIS.2004.5},
author = {Saraiya, P. and North, C. and Duca, K.},
pages = {1--8},
keywords = {Data visualization, empirical evaluation, insight, high throughput experiments, microarray data, bioinformatics},
abstract = {High-throughput experiments such as gene expression microarrays in the life sciences result in large datasets. In response, a wide variety of visualization tools have been created to facilitate data analysis. Biologists often face a dilemma in choosing the best tool for their situation. The tool that works best for one biologist may not work well for another due to differences in the type of insight they seek from their data. A primary purpose of a visualization tool is to provide domain-relevant insight into the data. Ideally, any user wants maximum information in the least possible time. In this paper we identify several distinct characteristics of insight that enable us to recognize and quantify it. Based on this, we empirically evaluate five popular microarray visualization tools. Our conclusions can guide biologists in selecting the best tool for their data, and computer scientists in developing and evaluating visualizations},
}
@misc{p1513,
year = 2004,
title = {An Experimental Investigation of Magnification Lens Offset and Its Impact on Imagery Analysis},
doi = {10.1109/INFVIS.2004.6},
url = {http://dx.doi.org/10.1109/INFVIS.2004.6},
author = {Darling, E. and Newbern, C. and Kalghatgi, N. and Burgman, A. and Recktenwald, K.},
pages = {5--5},
keywords = {},
abstract = {A digital lens is a user interface mechanism that is a potential solution to information mangement problems. We investigated the use of digital lensing applied to imagery analysis. Participants completed three different types of tasks (locate, follow, and compare) using a magnification lens with three different degrees of offset (aligned, adjacent, and docked) over a high-resolution aerial photo. Although no lens offset mode was significantly better than another, most participants preferred the adjacent mode for the locate and compare tasks, and the docked mode for the follow tasks. This paper describes the results of a user study of magnification lenses and provides new insights into preferences of and interactions with digital lensing.},
}
@misc{p1514,
year = 2004,
title = {ARNA: Interactive Comparison and Alignment of RNA Secondary Structure},
doi = {10.1109/INFVIS.2004.7},
url = {http://dx.doi.org/10.1109/INFVIS.2004.7},
author = {Gainant, G. and Auber, D.},
pages = {8--8},
keywords = {},
abstract = {ARNA is an interactive visualization system that supports comparison and alignment of RNA secondary structure. We present a new approach to RNA alignment that exploits the complex structure of the Smith-Waterman local distance matrix, allowing people to explore the space of possible partial alignments to discover a good global solution. The modular software architecture separates the user interface from computation, allowing the possibility of incorporating different alignment algorithms into the same framework.},
}
@inproceedings{p1515,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Artifacts of the Presence Era: Using Information Visualization to Create an Evocative Souvenir},
doi = {10.1109/INFVIS.2004.8},
url = {http://dx.doi.org/10.1109/INFVIS.2004.8},
author = {Viegas, F.B. and Perry, E. and Howe, E. and Donath, J.},
pages = {105--111},
keywords = {visualization, history, public space},
abstract = {We present Artifacts of the Presence Era, a digital installation that uses a geological metaphor to visualize the events in a physical space over time. The piece captures video and audio from a museum and constructs an impressionistic visualization of the evolving history in the space. Instead of creating a visualization tool for data analysis, we chose to produce a piece that functions as a souvenir of a particular time and place. We describe the design choices we made in creating this installation, the visualization techniques we developed, and the reactions we observed from users and the media. We suggest that the same approach can be applied to a more general set of visualization contexts, ranging from email archives to newsgroups conversations},
}
@misc{p1516,
year = 2004,
title = {BinX: Dynamic Exploration of Time Series Datasets Across Aggregation Levels},
doi = {10.1109/INFVIS.2004.11},
url = {http://dx.doi.org/10.1109/INFVIS.2004.11},
author = {Berry, L. and Munzner, T.},
pages = {2--2},
keywords = {},
abstract = {Many fields of study produce time series datasets, and both the size and number of theses datasets are increasing rapidly due to the improvement of data accumulation methods such as small, cheap sensors and routine logging of events. Humans often fail to comprehend the structure of a long time series dataset because of the overwhelming amount of data and the range of different time scales at which there may be meaningful patterns. BinX is an interactive tool that provides dynamic visualization and manipulation of long time series datasets. The dataset is visualized through user controlled aggregation, augmented by various information visualization techniques.},
}
@inproceedings{p1517,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Building Highly-Coordinated Visualizations in Improvise},
doi = {10.1109/INFVIS.2004.12},
url = {http://dx.doi.org/10.1109/INFVIS.2004.12},
author = {Weaver, C.},
pages = {159--166},
keywords = {coordinated queries, coordination, exploratory visualization, multiple views, visual abstraction language},
abstract = {Improvise is a fully-implemented system in which users build and browse multiview visualizations interactively using a simple shared-object coordination mechanism coupled with a flexible, expression-based visual abstraction language. By coupling visual abstraction with coordination, users gain precise control over how navigation and selection in the visualization affects the appearance of data in individual views. As a result, it is practical to build visualizations with more views and richer coordination in Improvise than in other visualization systems. Building and browsing activities are integrated in a single, live user interface that lets users alter visualizations quickly and incrementally during data exploration},
}
@misc{p1518,
year = 2004,
title = {Capstone Address: Visualization as a Medium for Capturing and Sharing Thoughts},
doi = {10.1109/INFVIS.2004.13},
url = {http://dx.doi.org/10.1109/INFVIS.2004.13},
author = {Roth, S.F.},
pages = {xiii--xiii},
keywords = {},
abstract = {},
}
@misc{p1519,
year = 2004,
title = {Case Study: Visualizing Visualization},
doi = {10.1109/INFVIS.2004.14},
url = {http://dx.doi.org/10.1109/INFVIS.2004.14},
author = {van Ham, F.},
pages = {r5--r5},
keywords = {},
abstract = {In this case study we attempt to visualize a real-world dataset consisting of 600 recently published information visualization papers and their references. This is done by first creating a global layout of the entire graph that preserves any cluster structure present. We then use this layout as a basis to define a hierarchical clustering. The clusters in this hierarchy are labelled using keywords supplied with the dataset, allowing insight into the clusters semantics.},
}
@inproceedings{p1520,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Clutter Reduction in Multi-Dimensional Data Visualization Using Dimension Reordering},
doi = {10.1109/INFVIS.2004.15},
url = {http://dx.doi.org/10.1109/INFVIS.2004.15},
author = {Peng, W. and Ward, M.O. and Rundensteiner, E.A.},
pages = {89--96},
keywords = {Multidimensional visualization, dimension order, visual clutter, visual structure},
abstract = {Visual clutter denotes a disordered collection of graphical entities in information visualization. Clutter can obscure the structure present in the data. Even in a small dataset, clutter can make it hard for the viewer to find patterns, relationships and structure. In this paper, we define visual clutter as any aspect of the visualization that interferes with the viewer's understanding of the data, and present the concept of clutter-based dimension reordering. Dimension order is an attribute that can significantly affect a visualization's expressiveness. By varying the dimension order in a display, it is possible to reduce clutter without reducing information content or modifying the data in any way. Clutter reduction is a display-dependent task. In this paper, we follow a three-step procedure for four different visualization techniques. For each display technique, first, we determine what constitutes clutter in terms of display properties; then we design a metric to measure visual clutter in this display; finally we search for an order that minimizes the clutter in a display},
}
@misc{p1521,
year = 2004,
title = {Creating and Managing "Lookmarks" in ParaView},
doi = {10.1109/INFVIS.2004.16},
url = {http://dx.doi.org/10.1109/INFVIS.2004.16},
author = {Stanton, E. and Kegelmeyer, W.P.},
pages = {19--19},
keywords = {},
abstract = {This paper describes the integration of lookmarks into the ParaView visualization tool. Lookmarks are pointers to views of specific parts of a dataset. They were so named because lookmarks are to a visualization tool and dataset as bookmarks are to a browser and the World Wide Web. A lookmark can be saved and organized among other lookmarks within the context of ParaView. Then at a later time, either in the same ParaView session or in a different one, it can be regenerated, displaying the exact view of the data that had previously been saved. This allows the user to pick up where they left off, to continue to adjust the view or otherwise manipulate the data. Lookmarks facilitate collaboration between users who wish to share views of a dataset. They enable more effective data comparison because they can be applied to other datasets. They also serve as a way of organizing a user’s data. Ultimately, a lookmark is a time-saving tool that automates the recreation of a complex view of the data.},
}
@misc{p1522,
year = 2004,
title = {Distortion-Based Visualization for Long-Term Continuous Acoustic Monitoring},
doi = {10.1109/INFVIS.2004.17},
url = {http://dx.doi.org/10.1109/INFVIS.2004.17},
author = {Tsutsumi, F. and Itoh, N. and Onoda, T.},
pages = {21--21},
keywords = {},
abstract = {Visualizing long-term acoustic data has been an important subject in the field of equipment surveillance and equipment diagnosis. This paper proposes a distortion-based visualization method of long-term acoustic data. We applied the method to 1 hour observation data of electric discharge sound, and our method could visualize the sound data more intelligibly as compared with conventional methods.},
}
@inproceedings{p1523,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Dynamic Drawing of Clustered Graphs},
doi = {10.1109/INFVIS.2004.18},
url = {http://dx.doi.org/10.1109/INFVIS.2004.18},
author = {Frishman, Y. and Tal, A.},
pages = {191--198},
keywords = {graph drawing, dynamic layout, mobile objects, software visualization},
abstract = {This paper presents an algorithm for drawing a sequence of graphs that contain an inherent grouping of their vertex set into clusters. It differs from previous work on dynamic graph drawing in the emphasis that is put on maintaining the clustered structure of the graph during incremental layout. The algorithm works online and allows arbitrary modifications to the graph. It is generic and can be implemented using a wide range of static force-directed graph layout tools. The paper introduces several metrics for measuring layout quality of dynamic clustered graphs. The performance of our algorithm is analyzed using these metrics. The algorithm has been successfully applied to visualizing mobile object software},
}
@inproceedings{p1524,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Evaluating a System for Interactive Exploration of Large, Hierarchically Structured Document Repositories},
doi = {10.1109/INFVIS.2004.19},
url = {http://dx.doi.org/10.1109/INFVIS.2004.19},
author = {Granitzer, M. and Kienreich, W. and Sabol, V. and Andrews, K. and Klieber, W.},
pages = {127--134},
keywords = {information visualisation, navigation, document retrieval, hierarchical repositories, knowledge management, information management, force-directed placement, Voronoi},
abstract = {The InfoSky visual explorer is a system enabling users to interactively explore large, hierarchically structured document collections. Similar to a real-world telescope, InfoSky employs a planar graphical representation with variable magnification. Documents of similar content are placed close to each other and displayed as stars, while collections of documents at a particular level in the hierarchy are visualised as bounding polygons. Usability testing of an early prototype implementation of InfoSky revealed several design issues which prevented users from fully exploiting the power of the visual metaphor. Evaluation results have been incorporated into an advanced prototype, and another usability test has been conducted. A comparison of test results demonstrates enhanced system performance and points out promising directions for further work},
}
@misc{p1525,
year = 2004,
title = {EventScope: Bringing Remote Experience of Mars to the Public through Telepresence},
doi = {10.1109/INFVIS.2004.20},
url = {http://dx.doi.org/10.1109/INFVIS.2004.20},
author = {Myers, E. and Coppin, P. and Wagner, M. and Fischer, K. and Luisa Lu and McCloskey, W.R. and Seneker, D.},
pages = {16--16},
keywords = {},
abstract = {Telepresence, experiencing a place without physically being there, offers an important means for the public experience of remote locations such as distant continents or other planets. EventScope presents one such telepresence visualization interface for bringing scientific missions to the public. Currently, remote experience lessons based on NASA’s Mars Exploration Rover missions are being made available through the EventScope framework to museums, classrooms, and the public at large.},
}
@inproceedings{p1526,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Expand-Ahead: A Space-Filling Strategy for Browsing Trees},
doi = {10.1109/INFVIS.2004.21},
url = {http://dx.doi.org/10.1109/INFVIS.2004.21},
author = {McGuffin, M.J. and Davison, G. and Balakrishnan, R.},
pages = {119--126},
keywords = {tree browsing and navigation, focus+context, expand-ahead, automatic expansion, space filling, adaptive user interfaces},
abstract = {Many tree browsers allow subtrees under a node to be collapsed or expanded, enabling the user to control screen space usage and selectively drill-down. However, explicit expansion of nodes can be tedious. Expand-ahead is a space-filling strategy by which some nodes are automatically expanded to fill available screen space, without expanding so far that nodes are shown at a reduced size or outside the viewport. This often allows a user exploring the tree to see further down the tree without the effort required in a traditional browser. It also means the user can sometimes drill-down a path faster, by skipping over levels of the tree that are automatically expanded for them. Expand-ahead differs from many detail-in-context techniques in that there is no scaling or distortion involved. We present 1D and 2D prototype implementations of expand-ahead, and identify various design issues and possible enhancements to our designs. Our prototypes support smooth, animated transitions between different views of a tree. We also present the results of a controlled experiment which show that, under certain conditions, users are able to drill-down faster with expand-ahead than without},
}
@misc{p1527,
year = 2004,
title = {Exploring and Visualizing the History of InfoVis},
doi = {10.1109/INFVIS.2004.22},
url = {http://dx.doi.org/10.1109/INFVIS.2004.22},
author = {Keim, D.A. and Barro, H. and Panse, C. and Schneidewind, J. and Sips, M.},
pages = {r6--r6},
keywords = {},
abstract = {},
}
@misc{p1528,
year = 2004,
title = {Exploring InfoVis Publication History with Tulip},
doi = {10.1109/INFVIS.2004.23},
url = {http://dx.doi.org/10.1109/INFVIS.2004.23},
author = {Delest, M. and Munzner, T. and Auber, D. and Domenger, J.},
pages = {r10--r10},
keywords = {},
abstract = {We show the structure of the InfoVis publications dataset using Tulip, a scalable open-source visualization system for graphs and trees. Tulip supports interactive navigation and many options for layout. Subgraphs of the full dataset can be created interactively or using a wide set of algorithms based on graph theory and combinatorics, including several kinds of clustering. We found that convolution clustering and small world clustering were particularly effective at showing the structure of the InfoVis publications dataset, as was coloring by the Strahler metric.},
}
@inproceedings{p1529,
booktitle = {Proc. InfoVis},
year = 2004,
title = {EZEL: a Visual Tool for Performance Assessment of Peer-to-Peer File-Sharing Network},
doi = {10.1109/INFVIS.2004.25},
url = {http://dx.doi.org/10.1109/INFVIS.2004.25},
author = {Voinea, L. and Telea, A. and van Wijk, J.J.},
pages = {41--48},
keywords = {process visualization, distributed file systems visualization, P2P file-sharing networks visualization, small displays},
abstract = {In this paper we present EZEL, a visual tool we developed for the performance assessment of peer-to-peer file-sharing networks. We start by identifying the relevant data transferred in this kind of networks and the main performance assessment questions. Then we describe the visualization of data from two different points of view. First we take servers as focal points and we introduce a new technique, faded cushioning, which allows visualizing the same data from different perspectives. Secondly, we present the viewpoint of files, and we expose the correlations with the server stance via a special scatter plot. Finally, we discuss how our tool, based on the described techniques, is effective in the performance assessment of peer-to-peer file-sharing networks},
}
@misc{p1530,
year = 2004,
title = {faMailiar & Intimacy-Based Email Visualization},
doi = {10.1109/INFVIS.2004.26},
url = {http://dx.doi.org/10.1109/INFVIS.2004.26},
author = {Mandic, M. and Kerne, A.},
pages = {14--14},
keywords = {},
abstract = {Email has developed into one of the most extensively used computer applications. Email interfaces, on the other hand, have gone through very few transformations since their inception, and as the growing volumes of email data accumulate in users' email boxes, these interfaces fail to provide effective message handling and browsing support. Saved email messages provide not only a vast record of one's electronic past, but also a potential source of valuable insights into the structure and dynamics of one's social network. In this paper, we present faMailiar, a novel email visualization that draws upon email's inherently personal character by using intimacy as a key visualization parameter. The program presents a visualization of email use over time. faMailiar facilitates navigation through large email collections, enabling the user to discover communication rhythms and patterns.},
}
@inproceedings{p1531,
booktitle = {Proc. InfoVis},
year = 2004,
title = {GeoTime Information Visualization},
doi = {10.1109/INFVIS.2004.27},
url = {http://dx.doi.org/10.1109/INFVIS.2004.27},
author = {Kapler, T. and Wright, W.},
pages = {25--32},
keywords = {3-D visualization, spatiotemporal, geospatial, interactive visualization, visual data analysis, link analysis},
abstract = {Analyzing observations over time and geography is a common task but typically requires multiple, separate tools. The objective of our research has been to develop a method to visualize, and work with, the spatial interconnectedness of information over time and geography within a single, highly interactive 3D view. A novel visualization technique for displaying and tracking events, objects and activities within a combined temporal and geospatial display has been developed. This technique has been implemented as a demonstratable prototype called GeoTime in order to determine potential utility. Initial evaluations have been with military users. However, we believe the concept is applicable to a variety of government and business analysis tasks},
}
@misc{p1532,
year = 2004,
title = {Histographs: Interactive Clustering of Stacked Graphs},
doi = {10.1109/INFVIS.2004.28},
url = {http://dx.doi.org/10.1109/INFVIS.2004.28},
author = {Pin Ren and Watson, B.},
pages = {17--17},
keywords = {},
abstract = {Visualization systems must intuitively display and allow interaction with large multivariate data on low-dimensional displays. One problem often encountered in the process is occlusion: the ambiguity that occurs when records from different data sets are mapped to the same display location. For example, because of occlusion summarizing 1000 graphs by simply stacking them one over another is pointless. We solve this problem by adapting the solution to a similar problem in the Information Murals system [2]: mapping the number of data elements at a location to display luminance. Inspired by histograms, which map data frequency to space, we call our solution histographs. By treating a histograph as a digital image, we can blur and highlight edges to emphasize data features. We also support interactive clustering of the data with data zooming and shape-based selection. We are currently investigating alternative occlusion blending schemes.},
}
@misc{p1533,
year = 2004,
title = {Hypothesis Visualization},
doi = {10.1109/INFVIS.2004.29},
url = {http://dx.doi.org/10.1109/INFVIS.2004.29},
author = {Cluxton, D. and Eick, S.G. and Jie Yun},
pages = {4--4},
keywords = {},
abstract = {We have constructed an information visualization tool for understanding complex arguments. The tool enables analysts to construct structured arguments using judicial proof techniques, associate evidence with hypotheses, and set evidence parameters such as relevance and credibility. Users manipulate the hypotheses and their associated inference networks using visualization techniques. Our tool integrates concepts from structured argumentation, analysis of competing hypotheses, and hypothesis scoring with information visualization. It presents new metaphors for visualizing and manipulating structured arguments.},
}
@misc{p1534,
year = 2004,
title = {IN-SPIRE InfoVis 2004 Contest Entry},
doi = {10.1109/INFVIS.2004.37},
url = {http://dx.doi.org/10.1109/INFVIS.2004.37},
author = {Pak Chung Wong and Hetzler, E. and Posse, C. and Whiting, M. and Havre, S. and Cramer, N. and Shah, A. and Singhal, M. and Turner, A. and Thomas, J.},
pages = {r2--r2},
keywords = {},
abstract = {This is the first part (summary) of a three-part contest entry submitted to IEEE InfoVis 2004. The contest topic is visualizing InfoVis symposium papers from 1995 to 2002 and their references. The paper introduces the visualization tool IN-SPIRE, the visualization process and results, and presents lessons learned.},
}
@misc{p1535,
year = 2004,
title = {Information Visualization Research: Citation and Co-Citation Highlights},
doi = {10.1109/INFVIS.2004.38},
url = {http://dx.doi.org/10.1109/INFVIS.2004.38},
author = {Chen, C.},
pages = {r11--r11},
keywords = {},
abstract = {An overview of the entry is given. The techniques used to prepare the InfoVis contest entry are outlined. The strengths and weaknesses are briefly discussed.},
}
@misc{p1536,
year = 2004,
title = {InfoVisExplorer},
doi = {10.1109/INFVIS.2004.39},
url = {http://dx.doi.org/10.1109/INFVIS.2004.39},
author = {Tyman, J. and Gruetzmacher, G.P. and Stasko, J.},
pages = {r7--r7},
keywords = {},
abstract = {In this paper we briefly describe 3 tools developed to visualize the history of information visualization papers. The visualization consists of a standard 3D scatterplot view enhanced with "bubbles," lines, text, and colors aimed at making comparisons between authors and topics found in the papers. Three components were developed to translate and display raw XML data using OpenGL and Cocoa. We use the visualization tool to perform five tasks and discuss it’s weaknesses.},
}
@misc{p1537,
year = 2004,
title = {Interactive Exploration of the AFS File System},
doi = {10.1109/INFVIS.2004.40},
url = {http://dx.doi.org/10.1109/INFVIS.2004.40},
author = {Foster, J. and Subramanian, K.R. and Herring, R. and Gail Ahn},
pages = {7--7},
keywords = {},
abstract = {Managing file systems of large organizations can present significant challenges in terms of the number of users, shared access to parts of the file system, and securing and monitoring critical parts of the file system. We present an interactive exploratory tool for monitoring and viewing the complex relationships within the Andrews File System (AFS). This tool is targeted as an aid to system administrators to manage users, applications and shared access. We tested our tool on UNC Charlotte’s Andrews File System (AFS) file system, which contains 4554 users, 556 user groups, and 2.2 million directories. Two types of visualizations are supported to explore file system relationships. In addition, drill-down features are provided to access the user file system and access control information of any directory within the system. All of the views are linked to facilitate easy navigation.},
}
@misc{p1538,
year = 2004,
title = {Interactive Poster: Visual Mining of Business Process Data},
doi = {10.1109/INFVIS.2004.41},
url = {http://dx.doi.org/10.1109/INFVIS.2004.41},
author = {Hao, M.C. and Keim, D.A. and Dayal, U. and Schneidewind, J.},
pages = {10--10},
keywords = {},
abstract = {},
}
@misc{p1539,
year = 2004,
title = {Interactive Visualization Approaches to the Analysis of System Identification Data},
doi = {10.1109/INFVIS.2004.42},
url = {http://dx.doi.org/10.1109/INFVIS.2004.42},
author = {Johansson, J. and Ljung, P. and Lindgren, D. and Cooper, M.},
pages = {11--11},
keywords = {},
abstract = {We propose an interactive visualization approach to finding a mathematical model for a real world process, commonly known in the field of control theory as system identification. The use of interactive visualization techniques provides the modeller with instant visual feedback which facilitates the model validation process. When working interactively with such large data sets, as are common in system identification, methods to handle this data efficiently are required. We are developing approaches based on data streaming to meet this need.},
}
@inproceedings{p1540,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Interactive Visualization of Small World Graphs},
doi = {10.1109/INFVIS.2004.43},
url = {http://dx.doi.org/10.1109/INFVIS.2004.43},
author = {van Ham, F. and van Wijk, J.J.},
pages = {199--206},
keywords = {Graph Visualization, Graph Drawing, Clustering, Small World Graphs},
abstract = {Many real world graphs have small world characteristics, that is, they have a small diameter compared to the number of nodes and exhibit a local cluster structure. Examples are social networks, software structures, bibliographic references and biological neural nets. Their high connectivity makes both finding a pleasing layout and a suitable clustering hard. In this paper we present a method to create scalable, interactive visualizations of small world graphs, allowing the user to inspect local clusters while maintaining a global overview of the entire structure. The visualization method uses a combination of both semantical and geometrical distortions, while the layout is generated by a spring embedder algorithm using recently developed force model. We use a cross referenced database of 500 artists as a running example},
}
@misc{p1541,
year = 2004,
title = {Keynote Address: From Information Visualization to Sensemaking: Connecting the Mind's Eye to the Mind's Muscle},
doi = {10.1109/INFVIS.2004.44},
url = {http://dx.doi.org/10.1109/INFVIS.2004.44},
author = {Card, S.K.},
pages = {xii--xii},
keywords = {},
abstract = {},
}
@misc{p1542,
year = 2004,
title = {Major Information Visualization Authors, Papers and Topics in the ACM Library},
doi = {10.1109/INFVIS.2004.45},
url = {http://dx.doi.org/10.1109/INFVIS.2004.45},
author = {Ke, W. and Borner, K. and Viswanath, L.},
pages = {r1--r1},
keywords = {},
abstract = {The presented work aims to identify major research topics, co-authorships, and trends in the IV Contest 2004 dataset. Co-author, paper-citation, and burst analysis were used to analyze the dataset. The results are visually presented as graphs, static Pajek [1] visualizations and interactive network layouts using Pajek’s SVG output feature. A complementary web page with all the raw data, details of the analyses, and high resolution images of all figures is available online at http://iv.slis.indiana.edu/ref/iv04contest/.},
}
@inproceedings{p1543,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Matrix Zoom: A Visual Interface to Semi-External Graphs},
doi = {10.1109/INFVIS.2004.46},
url = {http://dx.doi.org/10.1109/INFVIS.2004.46},
author = {Abello, J. and van Ham, F.},
pages = {183--190},
keywords = {Graph Visualization, Hierarchy Trees, Clustering, External Memory Algorithms, Cancer Data, Phone Traffic},
abstract = {In Web data, telecommunications traffic and in epidemiological studies, dense subgraphs correspond to subsets of subjects (i.e. users, patients) that share a collection of attributes values (i.e. accessed Web pages, email-calling patterns or disease diagnostic profiles). Visual and computational identification of these "clusters" becomes useful when domain experts desire to determine those factors of major influence in the formation of access and communication clusters or in the detection and contention of disease spread. With the current increases in graphic hardware capabilities and RAM sizes, it is more useful to relate graph sizes to the available screen real estate S and the amount of available RAM M, instead of the number of edges or nodes in the graph. We offer a visual interface that is parameterized by M and S and is particularly suited for navigation tasks that require the identification of subgraphs whose edge density is above certain threshold. This is achieved by providing a zoomable matrix view of the underlying data. This view is strongly coupled to a hierarchical view of the essential information elements present in the data domain. We illustrate the applicability of this work to the visual navigation of cancer incidence data and to an aggregated sample of phone call traffic},
}
@inproceedings{p1544,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Metric-Based Network Exploration and Multiscale Scatterplot},
doi = {10.1109/INFVIS.2004.47},
url = {http://dx.doi.org/10.1109/INFVIS.2004.47},
author = {Chiricota, Y. and Jourdan, F. and Melancon, G.},
pages = {135--142},
keywords = {Graph navigation, exploration, scatterplot, multiscale perceptual organization, clustering, filtering, blurring},
abstract = {We describe an exploratory technique based on the direct interaction with a 2D modified scatterplot computed from two different metrics calculated over the elements of a network. The scatterplot is transformed into an image by applying standard image processing techniques resulting into blurring effects. Segmentation of the image allow to easily select patches on the image as a way to extract subnetworks. We were inspired by the work of Wattenberg and Fisher [M. Wattenberg et al. (2003)] showing that the blurring process builds into a multiscale perceptual scheme, making this type of interaction intuitive to the user. We explain how the exploration of the network can be guided by the visual analysis of the blurred scatterplot and by its possible interpretations},
}
@misc{p1545,
year = 2004,
title = {MonkEllipse: Visualizing the History of Information Visualization},
doi = {10.1109/INFVIS.2004.48},
url = {http://dx.doi.org/10.1109/INFVIS.2004.48},
author = {Tzu-Wei Hsu and Inman, L. and McColgin, D. and Stamper, K.},
pages = {r9--r9},
keywords = {},
abstract = {In this paper, we describe the process and result of creating a visualization to capture the past 10 years of history in the field of Information Visualization, as part of the annual InfoVis Conference Contest. We began with an XML file containing data provided by the contest organizers, scrubbed and augmented the data, and created a database to hold the information. We designed a visualization and implemented it using Flash MX 2004 Professional with ActionScript 2.0, PHP, and PostgreSQL. The resulting visualization provides an overview of the field of Information Visualization, and allows users to see the connections between areas of the field, particular researchers, and documents.},
}
@inproceedings{p1546,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Non-Euclidean Spring Embedders},
doi = {10.1109/INFVIS.2004.49},
url = {http://dx.doi.org/10.1109/INFVIS.2004.49},
author = {Kobourov, S. and Wampler, K.},
pages = {207--214},
keywords = {force-directed algorithms, spring embedders, non-Euclidean geometry, hyperbolic space, spherical space, graph drawing, information visualization},
abstract = {We present a method by which force-directed algorithms for graph layouts can be generalized to calculate the layout of a graph in an arbitrary Riemannian geometry. The method relies on extending the Euclidean notions of distance, angle, and force-interactions to smooth nonEuclidean geometries via projections to and from appropriately chosen tangent spaces. In particular, we formally describe the calculations needed to extend such algorithms to hyperbolic and spherical geometries},
}
@misc{p1547,
year = 2004,
title = {One-For-All: Visualization of the Information Visualization Symposia},
doi = {10.1109/INFVIS.2004.50},
url = {http://dx.doi.org/10.1109/INFVIS.2004.50},
author = {Soon Tee Teoh and Kwan-Liu Ma},
pages = {r12--r12},
keywords = {},
abstract = {},
}
@inproceedings{p1548,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Paint Inspired Color Mixing and Compositing for Visualization},
doi = {10.1109/INFVIS.2004.52},
url = {http://dx.doi.org/10.1109/INFVIS.2004.52},
author = {Gossett, N. and Baoquan Chen},
pages = {113--118},
keywords = {RYB, Color Mixing, Perception},
abstract = {Color is often used to convey information, and color compositing is often required while visualizing multiattribute information. This paper proposes an alternative method for color compositing. In order to present understandable color blending to the general public, several techniques are proposed. First, a paint-inspired RYB color space is used. In addition, noise patterns are employed to produce subregions of pure color within an overlapped region. We show examples to demonstrate the effectiveness of our technique for visualization},
}
@misc{p1549,
year = 2004,
title = {PhylloTrees: Harnessing Nature’s Phyllotactic Patterns for Tree Layout},
doi = {10.1109/INFVIS.2004.53},
url = {http://dx.doi.org/10.1109/INFVIS.2004.53},
author = {Carpendale, S. and Agarawala, A.},
pages = {3--3},
keywords = {},
abstract = {We explore the use of nature’s phyllotactic patterns to inform the layout of hierarchical data. These naturally occurring patterns provide a non-overlapping, optimal packing when the total number of nodes is not known a priori. We present a family of expandable tree layouts based on these patterns.},
}
@misc{p1550,
year = 2004,
title = {RankSpiral: Toward Enhancing Search Results Visualizations},
doi = {10.1109/INFVIS.2004.56},
url = {http://dx.doi.org/10.1109/INFVIS.2004.56},
author = {Spoerri, A.},
pages = {18--18},
keywords = {},
abstract = {This paper addresses the problem of how to enable users to visually explore and compare large sets of documents that have been retrieved by different search engines or queries. The Rank-Spiral enables users to rapidly scan large numbers of documents and their titles in a single screen. It uses a spiral mapping that maximizes information density and minimizes occlusions. It solves the labeling problem by exploiting the structure of the special spiral mapping used. Focus+Context interactions enable users to examine document clusters or groupings in more detail.},
}
@inproceedings{p1551,
booktitle = {Proc. InfoVis},
year = 2004,
title = {RecMap: Rectangular Map Approximations},
doi = {10.1109/INFVIS.2004.57},
url = {http://dx.doi.org/10.1109/INFVIS.2004.57},
author = {Heilmann, R. and Keim, D.A. and Panse, C. and Sips, M.},
pages = {33--40},
keywords = {Geographic Visualization, Information Visualization, Database and Data Mining Visualization},
abstract = {In many application domains, data is collected and referenced by its geospatial location. Nowadays, different kinds of maps are used to emphasize the spatial distribution of one or more geospatial attributes. The nature of geospatial statistical data is the highly nonuniform distribution in the real world data sets. This has several impacts on the resulting map visualizations. Classical area maps tend to highlight patterns in large areas, which may, however, be of low importance. Cartographers and geographers used cartograms or value-by-area maps to address this problem long before computers were available. Although many automatic techniques have been developed, most of the value-by-area cartograms are generated manually via human interaction. In this paper, we propose a novel visualization technique for geospatial data sets called RecMap. Our technique approximates a rectangular partition of the (rectangular) display area into a number of map regions preserving important geospatial constraints. It is a fully automatic technique with explicit user control over all exploration constraints within the exploration process. Experiments show that our technique produces visualizations of geospatial data sets, which enhance the discovery of global and local correlations, and demonstrate its performance in a variety of applications},
}
@misc{p1552,
year = 2004,
title = {Resource Systems Reference Database},
doi = {10.1109/INFVIS.2004.58},
url = {http://dx.doi.org/10.1109/INFVIS.2004.58},
author = {Lu, D. and Dietrich, L.},
pages = {13--13},
keywords = {},
abstract = {This interactive poster proposes a novel, explorative way to browse a database containing links to resource systems-related information online. Our approach is an illustrative one, and draws on our combined backgrounds in computer science, graphic and interaction design, sustainability, community organization, and urban design. The data visualized in our prototype was collected by students in the course Sustainable Habits, which Lauren Dietrich taught at Stanford University during Winter 2004.},
}
@inproceedings{p1553,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Rethinking Visualization: A High-Level Taxonomy},
doi = {10.1109/INFVIS.2004.59},
url = {http://dx.doi.org/10.1109/INFVIS.2004.59},
author = {Tory, M. and Moller, T.},
pages = {151--158},
keywords = {visualization, taxonomy, classification, design model, user model, conceptual model},
abstract = {We present the novel high-level visualization taxonomy. Our taxonomy classifies visualization algorithms rather than data. Algorithms are categorized based on the assumptions they make about the data being visualized; we call this set of assumptions the design model. Because our taxonomy is based on design models, it is more flexible than existing taxonomies and considers the user's conceptual model, emphasizing the human aspect of visualization. Design models are classified according to whether they are discrete or continuous and by how much the algorithm designer chooses display attributes such as spatialization, timing, colour, and transparency. This novel approach provides an alternative view of the visualization field that helps explain how traditional divisions (e.g., information and scientific visualization) relates and overlap, and that may inspire research ideas in hybrid visualization areas},
}
@inproceedings{p1554,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Steerable, Progressive Multidimensional Scaling},
doi = {10.1109/INFVIS.2004.60},
url = {http://dx.doi.org/10.1109/INFVIS.2004.60},
author = {Williams, M. and Munzner, T.},
pages = {57--64},
keywords = {dimensionality reduction, multidimensional scaling},
abstract = {Current implementations of multidimensional scaling (MDS), an approach that attempts to best represent data point similarity in a low-dimensional representation, are not suited for many of today's large-scale datasets. We propose an extension to the spring model approach that allows the user to interactively explore datasets that are far beyond the scale of previous implementations of MDS. We present MDSteer, a steerable MDS computation engine and visualization tool that progressively computes an MDS layout and handles datasets of over one million points. Our technique employs hierarchical data structures and progressive layouts to allow the user to steer the computation of the algorithm to the interesting areas of the dataset. The algorithm iteratively alternates between a layout stage in which a subselection of points are added to the set of active points affected by the MDS iteration, and a binning stage which increases the depth of the bin hierarchy and organizes the currently unplaced points into separate spatial regions. This binning strategy allows the user to select onscreen regions of the layout to focus the MDS computation into the areas of the dataset that are assigned to the selected bins. We show both real and common synthetic benchmark datasets with dimensionalities ranging from 3 to 300 and cardinalities of over one million points},
}
@misc{p1555,
year = 2004,
title = {TextPool: Visualizing Live Text Streams},
doi = {10.1109/INFVIS.2004.63},
url = {http://dx.doi.org/10.1109/INFVIS.2004.63},
author = {Albrecht-Buehler, C. and Watson, B. and Shamma, D.A.},
pages = {1--1},
keywords = {},
abstract = {In today's fast-paced world it is becoming increasingly difficult to stay abreast of the public discourse. With the advent of hundreds of closed-captioned cable channels and internet-based channels such as news feeds, blogs, or email, knowing the "buzz" is a particular challenge. TextPool addresses this problem by quickly summarizing recent content in live text streams. The summarization is a dynamically changing textual collage that clusters related terms. We tested TextPool with the content of several RSS newswire feeds, which are updated roughly every five minutes. TextPool was able to handle this bandwidth well, and produced useful summarizations of feed content.},
}
@inproceedings{p1556,
booktitle = {Proc. InfoVis},
year = 2004,
title = {The InfoVis Toolkit},
doi = {10.1109/INFVIS.2004.64},
url = {http://dx.doi.org/10.1109/INFVIS.2004.64},
author = {Fekete, J.},
pages = {167--174},
keywords = {Information Visualization, Toolkit, Graphics, Integration},
abstract = {This article presents the InfoVis toolkit, designed to support the creation, extension and integration of advanced 2D information visualization components into interactive Java swing applications. The InfoVis toolkit provides specific data structures to achieve a fast action/feedback loop required by dynamic queries. It comes with a large set of components such as range sliders and tailored control panels required to control and configure the visualizations. These components are integrated into a coherent framework that simplifies the management of rich data structures and the design and extension of visualizations. Supported data structures currently include tables, trees and graphs. Supported visualizations include scatter plots, time series, parallel coordinates, treemaps, icicle trees, node-link diagrams for trees and graphs and adjacency matrices for graphs. All visualizations can use fisheye lenses and dynamic labeling. The InfoVis toolkit supports hardware acceleration when available through Agile2D, an implementation of the Java graphics API based on OpenGL, achieving speedups of 10 to 200 times. The article also shows how new visualizations can be added and extended to become components, enriching visualizations as well as general applications},
}
@inproceedings{p1557,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Time-Varying Data Visualization Using Information Flocking Boids},
doi = {10.1109/INFVIS.2004.65},
url = {http://dx.doi.org/10.1109/INFVIS.2004.65},
author = {Moere, A.V.},
pages = {97--104},
keywords = {time-varying information visualization, artificial life, 3D information visualization, motion, boids},
abstract = {This research demonstrates how principles of self-organization and behavior simulation can be used to represent dynamic data evolutions by extending the concept of information flocking, originally introduced by Proctor & Winter (1998), to time-varying datasets. A rule-based behavior system continuously controls and updates the dynamic actions of individual, three-dimensional elements that represent the changing data values of reoccurring data objects. As a result, different distinguishable motion types emerge that are driven by local interactions between the spatial elements as well as the evolution of time-varying data values. Notably, this representation technique focuses on the representation of dynamic data alteration characteristics, or how reoccurring data objects change over time, instead of depicting the exact data values themselves. In addition, it demonstrates the potential of motion as a useful information visualization cue. The original information flocking approach is extended to incorporate time-varying datasets, live database querying, continuous data streaming, real-time data similarity evaluation, automatic shape generation and more stable flocking algorithms. Different experiments prove that information flocking is capable of representing short-term events as well as long-term temporal data evolutions of both individual and groups of time-dependent data objects. An historical stock market quote price dataset is used to demonstrate the algorithms and principles of time-varying information flocking},
}
@inproceedings{p1558,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Topological Fisheye Views for Visualizing Large Graphs},
doi = {10.1109/INFVIS.2004.66},
url = {http://dx.doi.org/10.1109/INFVIS.2004.66},
author = {Gansner, E. and Koren, Y. and North, S.C.},
pages = {175--182},
keywords = {topological fisheye,large graph visualization},
abstract = {Graph drawing is a basic visualization tool. For graphs of up to hundreds of nodes and edges, there are many effective techniques available. At greater scale, data density and occlusion problems often negate its effectiveness. Conventional pan-and-zoom, and multiscale and geometric fisheye views are not fully satisfactory solutions to this problem. As an alternative, we describe a topological zooming method. It is based on the precomputation of a hierarchy of coarsened graphs, which are combined on the fly into renderings with the level of detail dependent on the distance from one or more foci. We also discuss a related distortion method that allows our technique to achieve constant information density displays},
}
@misc{p1559,
year = 2004,
title = {Tracking User Interactions Within Visualizations},
doi = {10.1109/INFVIS.2004.67},
url = {http://dx.doi.org/10.1109/INFVIS.2004.67},
author = {Groth, D.P. and Murphy, B.W.},
pages = {9--9},
keywords = {},
abstract = {We present a model and prototype system for tracking user interactions within a visualization. The history of the interactions are exposed to the user in a way that supports non-linear navigation of the visualization space. The interactions can be augmented with annotations, which, together with the interactions, can be shared with other users and applied to other data in a seamless way. The techniques constitute a novel approach for documenting information provenance.},
}
@inproceedings{p1560,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Uncovering Clusters in Crowded Parallel Coordinates Visualizations},
doi = {10.1109/INFVIS.2004.68},
url = {http://dx.doi.org/10.1109/INFVIS.2004.68},
author = {Artero, A.O. and de Oliveira, M.C.F. and Levkowitz, H.},
pages = {81--88},
keywords = {information visualization, visual clustering, density-based visualization, visual data mining},
abstract = {The one-to-one strategy of mapping each single data item into a graphical marker adopted in many visualization techniques has limited usefulness when the number of records and/or the dimensionality of the data set are very high. In this situation, the strong overlapping of graphical markers severely hampers the user's ability to identify patterns in the data from its visual representation. We tackle this problem here with a strategy that computes frequency or density information from the data set, and uses such information in parallel coordinates visualizations to filter out the information to be presented to the user, thus reducing visual clutter and allowing the analyst to observe relevant patterns in the data. The algorithms to construct such visualizations, and the interaction mechanisms supported, inspired by traditional image processing techniques such as grayscale manipulation and thresholding are also presented. We also illustrate how such algorithms can assist users to effectively identify clusters in very noisy large data sets},
}
@misc{p1561,
year = 2004,
title = {Understanding Eight Years of InfoVis Conferences Using PaperLens},
doi = {10.1109/INFVIS.2004.69},
url = {http://dx.doi.org/10.1109/INFVIS.2004.69},
author = {Bongshin Lee and Czerwinski, M. and Robertson, G. and Bederson, B.B.},
pages = {r3--r3},
keywords = {},
abstract = {We present PaperLens, a visualization that reveals connections, trends, and activity throughout the InfoVis conference community for the last 8 years. It tightly couples views across papers, authors, and references. This paper describes how we analyzed the data, the strengths and weaknesses of PaperLens, and interesting patterns and relationships we have discovered using PaperLens.},
}
@inproceedings{p1562,
booktitle = {Proc. InfoVis},
year = 2004,
title = {User Experiments with Tree Visualization Systems},
doi = {10.1109/INFVIS.2004.70},
url = {http://dx.doi.org/10.1109/INFVIS.2004.70},
author = {Kobsa, A.},
pages = {9--16},
keywords = {information visualization, experimental comparison, task performance, accuracy, user satisfaction, user interaction, design recommendations},
abstract = {This paper describes a comparative experiment with five well-known tree visualization systems, and Windows Explorer as a baseline system. Subjects performed tasks relating to the structure of a directory hierarchy, and to attributes of files and directories. Task completion times, correctness and user satisfaction were measured, and video recordings of subjects' interaction with the systems were made. Significant system and task type effects and an interaction between system and task type were found. Qualitative analyses of the video recordings were thereupon conducted to determine reasons for the observed differences, resulting in several findings and design recommendations as well as implications for future experiments with tree visualization systems},
}
@inproceedings{p1563,
booktitle = {Proc. InfoVis},
year = 2004,
title = {Value and Relation Display for Interactive Exploration of High Dimensional Datasets},
doi = {10.1109/INFVIS.2004.71},
url = {http://dx.doi.org/10.1109/INFVIS.2004.71},
author = {Jing Yang and Anilkumar Patro and Huang Shiping and Nishant Mehta and Ward, M.O. and Rundensteiner, E.A.},
pages = {73--80},
keywords = {Multi-dimensional visualization, pixel-oriented, multi-dimensional scaling, high dimensional datasets},
abstract = {Traditional multidimensional visualization techniques, such as glyphs, parallel coordinates and scatterplot matrices, suffer from clutter at the display level and difficult user navigation among dimensions when visualizing high dimensional datasets. In this paper, we propose a new multidimensional visualization technique named a value and relation (VaR) display, together with a rich set of navigation and selection tools, for interactive exploration of datasets with up to hundreds of dimensions. By explicitly conveying the relationships among the dimensions of a high dimensional dataset, the VaR display helps users grasp the associations among dimensions. By using pixel-oriented techniques to present values of the data items in a condensed manner, the VaR display reveals data patterns in the dataset using as little screen space as possible. The navigation and selection tools enable users to interactively reduce clutter, navigate within the dimension space, and examine data value details within context effectively and efficiently. The VaR display scales well to datasets with large numbers of data items by employing sampling and texture mapping. A case study on a real dataset, as well as the VaR displays of multiple real datasets throughout the paper, reveals how our proposed approach helps users interactively explore high dimensional datasets with large numbers of data items},
}
@misc{p1564,
year = 2004,
title = {VIM: A Framework for Intelligence Analysis},
doi = {10.1109/INFVIS.2004.72},
url = {http://dx.doi.org/10.1109/INFVIS.2004.72},
author = {Keahey, T.A. and Cox, K.C.},
pages = {22--22},
keywords = {},
abstract = {Intelligence analysts receive thousands of facts from a variety of sources. In addition to the bare details of the fact ÔÇö a particular person, for example ÔÇö each fact may have provenance, reliability, weight, and other attributes. Each fact may also be associated with other facts, e.g. that one person met another at a particular location. The analyst┬Æs task is to examine a huge collection of such loosely-structured facts, and try to "connect the dots" to perceive the underlying and unknown causes ÔÇö and their possible future courses. We have designed and implemented a Java platform called VIM to support intelligence analysts in their work.},
}
@misc{p1565,
year = 2004,
title = {Visual Browsing of Remote and Distributed Data},
doi = {10.1109/INFVIS.2004.73},
url = {http://dx.doi.org/10.1109/INFVIS.2004.73},
author = {Krishnaswamy, P. and Eick, S.G. and Grossman, R.},
pages = {12--12},
keywords = {},
abstract = {Data repositories around the world hold many thousands of data sets. Finding information from these data sets is greatly facilitated by being able to quickly and efficiently browse remote data sets. In this note, we introduce the Iconic Remote Visual Data Exploration tool(IRVDX), which is a visual data mining tool used for exploring the features of remote and distributed data without the necessity of downloading the entire data set. IRVDX employs three kinds of visualizations: one provides a reduced representation of the data sets, which we call Dataset Icons. These icons show the important statistical characteristics of data sets and help to identify relevant data sets from distributed repositories. Another one is called the Remote Dataset Visual Browser that provides visualizations to browse remote data without downloading the complete data set to identify its content. The final one provides visualizations to show the degree of similarity between two data sets and to visually determine whether a join of two remote data sets will be meaningful.},
}
@misc{p1566,
year = 2004,
title = {Visualizing and Interacting with Multi-Tree Hierarchical Data},
doi = {10.1109/INFVIS.2004.74},
url = {http://dx.doi.org/10.1109/INFVIS.2004.74},
author = {Mohammadi-Aragh, M.J. and Jankun-Kelly, T.J.},
pages = {15--15},
keywords = {},
abstract = {This work focuses on visualizing highly cyclic hierarchical data. A user interface is discussed and its interaction is illustrated using a recipe database example. This example showcases a database with multiple categories for each recipe (database entry).},
}
@misc{p1567,
year = 2004,
title = {Visualizing E-mail with a Semantically Zoomable Interface},
doi = {10.1109/INFVIS.2004.75},
url = {http://dx.doi.org/10.1109/INFVIS.2004.75},
author = {Diep, E. and Jacob, R.},
pages = {6--6},
keywords = {},
abstract = {We introduce a semantically zoomable interface that displays emails as interactive objects rather than files containing lines of text, as in traditional e-mail interfaces. In this system, e-mails are displayed as node objects called e-mail nodes within a 2.5-dimensional world. The e-mail nodes are semantically zoomable and each may be rearranged to different locations within the plane to organize threads, topics, or projects. The prototype for this system was built using the Piccolo toolkit, the successor of Pad++ and Jazz [2, 3].},
}
@misc{p1568,
year = 2004,
title = {Visualizing High Dimensional Datasets Using Partiview},
doi = {10.1109/INFVIS.2004.76},
url = {http://dx.doi.org/10.1109/INFVIS.2004.76},
author = {Surendran, D. and Levy, S.},
pages = {20--20},
keywords = {},
abstract = {A standard method of visualizing high-dimensional data is reducing its dimensionality to two or three using some algorithm, and then creating a scatterplot with data represented by labelled and/or colored dots. Two problems with this approach are (1) dots do not represent data well, (2) reducing to just three dimensions does not make full use of several dimensionality-reduction algorithms. We demonstrate how Partiview can be used to solve these problems, in the context of handwriting recognition and image retrieval.},
}
@misc{p1569,
year = 2004,
title = {WilmaScope Graph Visualisation},
doi = {10.1109/INFVIS.2004.77},
url = {http://dx.doi.org/10.1109/INFVIS.2004.77},
author = {Ahmed, A. and Dwyer, T. and Murray, C. and Le Song and Ying Xin Wu},
pages = {r4--r4},
keywords = {},
abstract = {Our visualisation of the IEEE InfoVis citation network is based on 3D graph visualisation techniques. To make effective use of the third dimension we use a layered approach, constraining nodes to lie on parallel planes depending on parameters such as year of publication or link degree. Within the parallel planes nodes are arranged using a fast force-directed layout method. A number of clusters representing different research areas were identified using a self organising map approach.},
}
@inproceedings{p1681,
booktitle = {Proc. InfoVis},
year = 2003,
title = {A model of multi-scale perceptual organization in information graphics},
doi = {10.1109/INFVIS.2003.1249005},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249005},
author = {Wattenberg, M. and Fisher, D.},
pages = {23--30},
keywords = {Visualization, Perceptual Organization, Scale Space, Design Methodology},
abstract = {We propose a new method for assessing the perceptual organization of information graphics, based on the premise that the visual structure of an image should match the structure of the data it is intended to convey. The core of our method is a new formal model of one type of perceptual structure, based on classical machine vision techniques for analyzing an image at multiple resolutions. The model takes as input an arbitrary grayscale image and returns a lattice structure describing the visual organization of the image. We show how this model captures several aspects of traditional design aesthetics, and we describe a software tool that implements the model to help designers analyze and refine visual displays. Our emphasis here is on demonstrating the model's potential as a design aid rather than as a description of human perception, but given its initial promise we propose a variety of ways in which the model could be extended and validated.},
}
@inproceedings{p1682,
booktitle = {Proc. InfoVis},
year = 2003,
title = {A virtual workspace for hybrid multidimensional scaling algorithms},
doi = {10.1109/INFVIS.2003.1249013},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249013},
author = {Ross, G. and Chalmers, M.},
pages = {91--96},
keywords = {Data-flow, visual programming, multidimensional scaling, multiple views, hybrid algorithms, complexity},
abstract = {In visualising multidimensional data, it is well known that different types of algorithms to process them. Data sets might be distinguished according to volume, variable types and distribution, and each of these characteristics imposes constraints upon the choice of applicable algorithms for their visualization. Previous work has shown that a hybrid algorithmic approach can be successful in addressing the impact of data volume on the feasibility of multidimensional scaling (MDS). This suggests that hybrid combinations of appropriate algorithms might also successfully address other characteristics of data. This paper presents a system and framework in which a user can easily explore hybrid algorithms and the data flowing through them. Visual programming and a novel algorithmic architecture let the user semi-automatically define data flows and the co-ordination of multiple views.},
}
@inproceedings{p1683,
booktitle = {Proc. InfoVis},
year = 2003,
title = {An experimental evaluation of continuous semantic zooming in program visualization},
doi = {10.1109/INFVIS.2003.1249021},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249021},
author = {Summers, K.L. and Goldsmith, T.E. and Kubica, S. and Caudell, T.P.},
pages = {155--162},
keywords = {Program visualization, Human subjects testing, Visual program languages},
abstract = {This paper presents the results of an experiment aimed at investigating how different methods of viewing visual programs affect users' understanding. The first two methods used traditional flat and semantic zooming models of program representation; the third is a new representation that uses semantic zooming combined with blending and proximity. The results of several search tasks performed by approximately 80 participants showed that the new method resulted in both faster and more accurate searches than the other methods.},
}
@inproceedings{p1684,
booktitle = {Proc. InfoVis},
year = 2003,
title = {BARD: A visualization tool for biological sequence analysis},
doi = {10.1109/INFVIS.2003.1249029},
url = {http://doi.ieeecomputersociety.org/10.1109/INFVIS.2003.1249029},
author = {Spell, R. and Brady, R. and Dietrich, F.},
pages = {219--225},
keywords = {sequence analysis, comparative genomics, visualization, arc diagram, BARD},
abstract = {We present BARD (biological arc diagrams), a visualization tool for biological sequence analysis. The development of BARD began with the application of Wattenberg's arc diagrams [Wattenberg 2002] to results from sequence analysis programs, such as BLAST [Altschul et al. 1990]. In this paper, we extend the initial arc diagram concept in two ways: 1) by separating the visualization method from the underlying matching algorithm and 2) by expanding the types of matches to include inexact matches, complemented palindrome matches, and inter-sequence matches. BARD renders each type of match distinctly, resulting in a powerful tool to quickly understand sequence similarities and differences. We illustrate the power of BARD by applying the technique to a comparative sequence analysis of the human pathogenic fungi Cryptococcus neoformans.},
}
@inproceedings{p1685,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Between aesthetics and utility: designing ambient information visualizations},
doi = {10.1109/INFVIS.2003.1249031},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249031},
author = {Skog, T. and Ljungblad, S. and Holmquist, L.E.},
pages = {233--240},
keywords = {Ambient information visualization, informative art, ambient displays, calm technology},
abstract = {Unlike traditional information visualization, ambient information visualizations reside in the environment of the user rather than on the screen of a desktop computer. Currently, most dynamic information that is displayed in public places consists of text and numbers. We argue that information visualization can be employed to make such dynamic data more useful and appealing. However, visualizations intended for non-desktop spaces will have to both provide valuable information and present an attractive addition to the environment - they must strike a balance between aesthetical appeal and usefulness. To explore this, we designed a real-time visualization of bus departure times and deployed it in a public space, with about 300 potential users. To make the presentation more visually appealing, we took inspiration from a modern abstract artist. The visualization was designed in two passes. First, we did a preliminary version that was presented to and discussed with prospective users. Based on their input, we did a final design. We discuss the lessons learned in designing this and previous ambient information visualizations, including how visual art can be used as a design constraint, and how the choice of information and the placement of the display affect the visualization.},
}
@inproceedings{p1686,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Causality visualization using animated growing polygons},
doi = {10.1109/INFVIS.2003.1249025},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249025},
author = {Elmqvist, N. and Tsigas, P.},
pages = {189--196},
keywords = {causal relations, information visualization, interactive animation},
abstract = {We present Growing Polygons, a novel visualization technique for the graphical representation of causal relations and information flow in a system of interacting processes. Using this method, individual processes are displayed as partitioned polygons with color-coded segments showing dependencies to other processes. The entire visualization is also animated to communicate the dynamic execution of the system to the user. The results from a comparative user study of the method show that the Growing Polygons technique is significantly more efficient than the traditional Hasse diagram visualization for analysis tasks related to deducing information flow in a system for both small and large executions. Furthermore, our findings indicate that the correctness when solving causality tasks is significantly improved using our method. In addition, the subjective ratings of the users rank the method as superior in all regards, including usability, efficiency, and enjoyability.},
}
@inproceedings{p1687,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Compound brushing},
doi = {10.1109/INFVIS.2003.1249024},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249024},
author = {Hong Chen},
pages = {181--188},
keywords = {brushing, selection, dynamic graphics, data visualization, higraph, visual programming, dynamic query},
abstract = {This paper proposes a conceptual model called compound brushing for modeling the brushing techniques used in dynamic data visualization. In this approach, brushing techniques are modeled as higraphs with five types of basic entities: data, selection, device, renderer, and transformation. Using this model, a flexible visual programming tool is designed not only to configure/control various common types of brushing techniques currently used in dynamic data visualization, but also to investigate new brushing techniques.},
}
@inproceedings{p1688,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Constant density displays using diversity sampling},
doi = {10.1109/INFVIS.2003.1249019},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249019},
author = {Derthick, M. and Christel, M.G. and Hauptmann, A.G. and Wactlar, H.D.},
pages = {137--144},
keywords = {Information Visualization, Collage},
abstract = {The Informedia Digital Video Library user interface summarizes query results with a collage of representative keyframes. We present a user study in which keyframe occlusion caused difficulties. To use the screen space most efficiently to display images, both occlusion and wasted whitespace should be minimized. Thus optimal choices will tend toward constant density displays. However, previous constant density algorithms are based on global density, which leads to occlusion and empty space if the density is not uniform. We introduce an algorithm that considers the layout of individual objects and avoids occlusion altogether. Efficiency concerns are important for dynamic summaries of the Informedia Digital Video Library, which has hundreds of thousands of shots. Posting multiple queries that take into account parameters of the visualization as well as the original query reduces the amount of work required. This greedy algorithm is then compared to an optimal one. The approach is also applicable to visualizations containing complex graphical objects other than images, such as text, icons, or trees.},
}
@inproceedings{p1689,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Conveying shape with texture: an experimental investigation of the impact of texture type on shape categorization judgments},
doi = {10.1109/INFVIS.2003.1249022},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249022},
author = {Kim, S. and Hagh-Shenas, H. and Interrante, V.},
pages = {163--170},
keywords = {shape perception, texture, principal directions},
abstract = {As visualization researchers, we are interested in gaining a better understanding of how to effectively use texture to facilitate shape perception. If we could design the ideal texture pattern to apply to an arbitrary smoothly curving shape to be most accurately and effectively perceived, what would the characteristics of that texture pattern be? In this paper we describe the results of a comprehensive controlled observer experiment intended to yield insight into that question. Here, we report the results of a new study comparing the relative accuracy of observers' judgments of shape type (elliptical, cylindrical, hyperbolic or flat) and shape orientation (convex, concave, both, or neither) for local views of boundary masked quadric surface patches under six different principal direction texture pattern conditions plus two texture conditions (an isotropic pattern and a non-principal direction oriented anisotropic pattern), under both perspective and orthographic projection conditions and from both head-on and oblique viewpoints. Our results confirm the hypothesis that accurate shape perception is facilitated to a statistically significantly greater extent by some principal direction texture patterns than by others. Specifically, we found that, for both views, under conditions of perspective projection, participants more often correctly identified the shape category and the shape orientation when the surface was textured with the pattern that contained oriented energy along both the first and second principal directions only than in the case of any other texture condition. Patterns containing markings following only one of the principal directions, or containing information along other directions in addition to the principal directions yielded poorer performance overall.},
}
@inproceedings{p1690,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Coordinated graph and scatter-plot views for the visual exploration of microarray time-series data},
doi = {10.1109/INFVIS.2003.1249023},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249023},
author = {Craig, P. and Kennedy, J.},
pages = {173--180},
keywords = {Bioinformatics, Microarrays, Information Visualization, Time-Series, Multiple-views},
abstract = {Microarrays are relatively new, high-throughput data acquisition technology for investigating biological phenomena at the micro-level. One of the more common procedures for microarray experimentation is that of the microarray time-course experiment. The product of microarray time-course experiment is time-series data, which subject to proper analysis has the potential to have significant impact on the diagnosis, treatment, and prevention of diseases. While existing information visualization techniques go some way to making microarray time-series data more manageable, requirements analysis has revealed significant limitations. The main finding was that users were unable to uncover and quantify common changes in value over a specified time-period. This paper describes a novel technique that provides this functionality by allowing the user to visually formulate and modify measurable queries with separate time-period and condition components. These visual queries are supported by the combination of a traditional value against time graph representation of the data with a complementary scatter-plot representation of a specified time-period. The multiple views of the visualization are coordinated so that the user can formulate and modify queries with rapid reversible display of query results in the traditional value against time graph format.},
}
@inproceedings{p1691,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Design choices when architecting visualizations},
doi = {10.1109/INFVIS.2003.1249007},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249007},
author = {Tang, D. and Stolte, C. and Bosche, R.},
pages = {41--48},
keywords = {information visualization, system architecture, semantic meta-data, data transformations, design tradeoffs},
abstract = {In this paper, we focus on some of the key design decisions we faced during the process of architecting a visualization system and present some possible choices, with their associated advantages and disadvantages. We frame this discussion within the context of Rivet, our general visualization environment designed for rapidly prototyping interactive, exploratory visualization tools for analysis. As we designed increasingly sophisticated visualizations, we needed to refine Rivet in order to be able to create these richer displays for larger and more complex data sets. The design decisions we discuss in this paper include: the internal data model, data access, semantic meta-data information the visualization can use to create effective visual decodings, the need for data transformations in a visualization tool, modular objects for flexibility, and the tradeoff between simplicity and expressiveness when providing methods for creating visualizations.},
}
@inproceedings{p1692,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Developing architectural lighting representations},
doi = {10.1109/INFVIS.2003.1249032},
url = {http://doi.ieeecomputersociety.org/10.1109/INFVIS.2003.1249032},
author = {Glaser, D. C. and Tan, R. and Canny, J. and Do, E.Y.},
pages = {241--248},
keywords = {information visualization, qualitative analysis, ethnographic fieldwork, architectural lighting design, energy efficiency},
abstract = {This paper reports on the development of a visualization system for architectural lighting designers. It starts by motivating the problem as both complex in its physics and social organization. Three iterations of prototypes for displaying time and space varying phenomena are discussed. Fieldwork is presented to identify where in practice they will be most effective. A set of user studies, one of which is analyzed in fine-grained detail, show how building designers incorporate visualization on hypothetical design problems. This has positive implications for both energy efficiency and lighting quality in buildings.},
}
@inproceedings{p1693,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Dynamic visualization of transient data streams},
doi = {10.1109/INFVIS.2003.1249014},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249014},
author = {Pak Chung Wong and Foote, H. and Adams, D. and Cowley, W. and Thomas, J.},
pages = {97--104},
keywords = {Dynamic Visualization, Text Visualization, Remote Sensing Imagery, Transient Data Stream},
abstract = {We introduce two dynamic visualization techniques using multidimensional scaling to analyze transient data streams such as newswires and remote sensing imagery. While the time-sensitive nature of these data streams requires immediate attention in many applications, the unpredictable and unbounded characteristics of this information can potentially overwhelm many scaling algorithms that require a full re-computation for every update. We present an adaptive visualization technique based on data stratification to ingest stream information adaptively when influx rate exceeds processing rate. We also describe an incremental visualization technique based on data fusion to project new information directly onto a visualization subspace spanned by the singular vectors of the previously processed neighboring data. The ultimate goal is to leverage the value of legacy and new information and minimize re-processing of the entire dataset in full resolution. We demonstrate these dynamic visualization results using a newswire corpus and a remote sensing imagery sequence.},
}
@inproceedings{p1694,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Edgelens: an interactive method for managing edge congestion in graphs},
doi = {10.1109/INFVIS.2003.1249008},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249008},
author = {Wong, N. and Carpendale, S. and Greenberg, S.},
pages = {51--58},
keywords = { Navigation, graph layout, distortion lens, information visualization, edge congestion, interactive visualization},
abstract = {An increasing number of tasks require people to explore, navigate and search extremely complex data sets visualized as graphs. Examples include electrical and telecommunication networks, Web structures, and airline routes. The problem is that graphs of these real world data sets have many interconnected nodes, ultimately leading to edge congestion: the density of edges is so great that they obscure nodes, individual edges, and even the visual information beneath the graph. To address this problem we developed an interactive technique called EdgeLens. An EdgeLens interactively curves graph edges away for a person's focus attention without changing the node positions. This opens up sufficient space to disambiguate node and edge relationships and to see underlying information while still preserving node layout. Initially two methods of creating this interaction were developed and compared in a user study. The results of this study were used in the selection of a basic approach and the subsequent development of the EdgeLens. We then improved the EdgeLens through use of transparency and colour and by allowing multiple lenses to appear on the graph.},
}
@inproceedings{p1695,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Empirical comparison of dynamic query sliders and brushing histograms},
doi = {10.1109/INFVIS.2003.1249020},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249020},
author = {Qing Li and North, C.},
pages = {147--153},
keywords = {Dynamic query, slider, histogram, usability study, information visualization, multidimensional visualization},
abstract = {Dynamic queries facilitate rapid exploration of information by real-time visual display of both query formulation and results. Dynamic query sliders are linked to the main visualization to filter data. A common alternative to dynamic queries is to link several simple visualizations, such as histograms, to the main visualization with a brushing interaction strategy. Selecting data in the histograms highlights that data in the main visualization. We compare these two approaches in an empirical experiment on DataMaps, a geographic data visualization tool. Dynamic query sliders resulted in better performance for simple range tasks, while brushing histograms was better for complex trend evaluation and attribute relation tasks. Participants preferred brushing histograms for understanding relationships between attributes and the rich information they provided.},
}
@misc{p1696,
year = 2003,
title = {Exploding the frame: designing for wall-size computer displays},
doi = {10.1109/INFVIS.2003.1249002},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249002},
author = {Shedd, B.},
keywords = {},
abstract = {High-resolution wall-size digital displays present significant new and different visual space to show and see imagery. The author has been working with two wall-size digital displays at Princeton University for five years and directing and producing IMAX films for a decade, and he has noted some unique design considerations for creating effective visual images when they are spread across entire walls. The author suggests these "frameless" screens - where images are so large we need to look around to see the entire field - need different ways of thinking about image design and visualization. Presenting such things as scale and detail take on new meaning when they can be displayed life-size and not shown in the context of one or many small frames such as we see everywhere. These design ideas will be of use for pervasive computing, interface research and design, interactive design, control design, representations of massive data sets, and creating effective displays of data for research and education.},
}
@inproceedings{p1697,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Exploring high-D spaces with multiform matrices and small multiples},
doi = {10.1109/INFVIS.2003.1249006},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249006},
author = {MacEachren, A.M. and Xiping, D. and Hardisty, F. and Diansheng Guo and Lengerich, G.},
pages = {31--38},
keywords = {geovisualization, EDA, scatterplot matrix,bivariate map, space-filling visualization, conditional entropy, small multiples, conditioning, GeoVISTA Studio },
abstract = {We introduce an approach to visual analysis of multivariate data that integrates several methods from information visualization, exploratory data analysis (EDA), and geovisualization. The approach leverages the component-based architecture implemented in GeoVISTA Studio to construct a flexible, multiview, tightly (but generically) coordinated, EDA toolkit. This toolkit builds upon traditional ideas behind both small multiples and scatterplot matrices in three fundamental ways. First, we develop a general, multiform, bivariate matrix and a complementary multiform, bivariate small multiple plot in which different bivariate representation forms can be used in combination. We demonstrate the flexibility of this approach with matrices and small multiples that depict multivariate data through combinations of: scatterplots, bivariate maps, and space-filling displays. Second, we apply a measure of conditional entropy to (a) identify variables from a high-dimensional data set that are likely to display interesting relationships and (b) generate a default order of these variables in the matrix or small multiple display. Third, we add conditioning, a kind of dynamic query/filtering in which supplementary (undisplayed) variables are used to constrain the view onto variables that are displayed. Conditioning allows the effects of one or more well understood variables to be removed form the analysis, making relationships among remaining variables easier to explore. We illustrate the individual and combined functionality enabled by this approach through application to analysis of cancer diagnosis and mortality data and their associated covariates and risk factors.},
}
@inproceedings{p1698,
booktitle = {Proc. InfoVis},
year = 2003,
title = {FundExplorer: supporting the diversification of mutual fund portfolios using context treemaps},
doi = {10.1109/INFVIS.2003.1249027},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249027},
author = {Csallner, C. and Handte, M. and Lehmann, O. and Stasko, J.},
pages = {203--208},
keywords = {information visualization, context, treemap, distortion, query, financial data, stock market, FundExplore},
abstract = {An equity mutual fund is a financial instrument that invests in a set of stocks. Any two different funds may partially invest in some of the same stocks, thus overlap is common. Portfolio diversification aims at spreading an investment over many different stocks in search of greater returns. Helping people with portfolio diversification is challenging because it requires informing them about both their current portfolio of stocks held through funds and the other stocks in the market not invested in yet. Current stock/fund visualization systems either waste screen real estate and visualization of all data points. We have developed a system called FundExplorer that implements a distorted treemap to visualize both the amount of money invested in a person's fund portfolio and the context of remaining market stocks. The FundExplorer system enables people to interactively explore diversification possibilities with their portfolios.},
}
@misc{p1699,
year = 2003,
title = {IEEE Symposium on Information Visualization 2003 (IEEE Cat. No.03TH8714)},
doi = {10.1109/INFVIS.2003.1249000},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249000},
author = {Munzner, T. and North, S.C.},
keywords = {},
abstract = {The following topics are dealt with: computer displays; multiscaling; graphs; high dimensionality; occlusion; visualization evaluation; linking and design studies.},
}
@inproceedings{p1700,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Improving Hybrid MDS with Pivot-Based Searching},
doi = {10.1109/INFVIS.2003.1249012},
url = {http://doi.ieeecomputersociety.org/10.1109/INFVIS.2003.1249012},
author = {Morrison, A. and Chalmers, M.},
pages = {85--90},
keywords = {Multidimensional scaling, MDS, spring models, hybrid algorithms, pivots, near-neighbour search, force directed placement},
abstract = {An algorithm is presented for the visualisation of multidimensional abstract data, building on a hybrid model introduced at InfoVis 2002. The most computationally complex stage of the original model involved performing a nearest-neighbour search for every data item. The complexity of this phase has been reduced by treating all high-dimensional relationships as a set of discretised distances to a constant number of randomly selected pivot items. In improving this computational bottleneck, the complexity is reduced from O(N v N) to O(N 5 4 ). As well as documenting this improvement, the paper describes evaluation with a data set of 108000 14-dimensional items; a considerable increase on the size of data previously tested. Results illustrate that the reduction in complexity is reflected in significantly improved run times and that no negative impact is made upon the quality of layout produced},
}
@misc{p1701,
year = 2003,
title = {Information esthetics: from MoMa to wall street},
doi = {10.1109/INFVIS.2003.1249003},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249003},
author = {Paley, W.B.},
pages = {11--11},
keywords = {},
abstract = {},
}
@inproceedings{p1702,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Intelligently resolving point occlusion},
doi = {10.1109/INFVIS.2003.1249018},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249018},
author = {Trutschl, M. and Grinstein, G. and Cvek, U.},
pages = {131--136},
keywords = {data visualization, information visualization, design, data points, data density, occlusion, identifiable points, jitter, neural networks},
abstract = {Large and high-dimensional data sets mapped to low-dimensional visualizations often result in perceptual ambiguities. One such ambiguity is overlap or occlusion that occurs when the number of records exceeds the number of unique locations in the presentation or when there exist two or more records that map to the same location. To lessen the affect of occlusion, non-standard visual attributes (i.e. shading and/or transparency) are applied, or such records may be remapped to a corresponding jittered location. The resulting mapping efficiently portrays the crowding of records but fails to provide the insight into the relationship between the neighboring records. We introduce a new interactive technique that intelligibly organizes overlapped points, a neural network-based smart jittering algorithm. We demonstrate this technique on a scatter plot, the most widely used visualization. The algorithm can be applied to other one, two, and multi-dimensional visualizations which represent data as points, including 3-dimensional scatter plots, RadViz, polar coordinates.},
}
@inproceedings{p1703,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Interactive hierarchical dimension ordering, spacing and filtering for exploration of high dimensional datasets},
doi = {10.1109/INFVIS.2003.1249015},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249015},
author = {Jing Yang and Wei Peng and Ward, M.O. and Rundensteiner, E.A.},
pages = {105--112},
keywords = {Dimension ordering, dimension spacing, dimension filtering, multidimensional visualization, high dimensional datasets},
abstract = {Large number of dimensions not only cause clutter in multi-dimensional visualizations, but also make it difficult for users to navigate the data space. Effective dimension management, such as dimension ordering, spacing and filtering, is critical for visual exploration of such datasets. Dimension ordering and spacing explicitly reveal dimension relationships in arrangement-sensitive multidimensional visualization techniques, such as parallel coordinates, star glyphs, and pixel-oriented techniques. They facilitate the visual discovery of patterns within the data. Dimension filtering hides some of the dimensions to reduce clutter while preserving the major information of the dataset. In this paper, we propose an interactive hierarchical dimension ordering, spacing and filtering approach, called DOSFA. DOSFA is based on dimension hierarchies derived from similarities among dimensions. It is scalable multi-resolution approach making dimensional management a tractable task. On the one hand, it automatically generates default settings for dimension ordering, spacing and filtering. On the other hand, it allows users to efficiently control all aspects of this dimension management process via visual interaction tools for dimension hierarchy manipulation. A case study visualizing a dataset containing over 200 dimensions reveals high dimensional visualization techniques.},
}
@inproceedings{p1704,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Mapping nominal values to numbers for effective visualization},
doi = {10.1109/INFVIS.2003.1249016},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249016},
author = {Rosario, G.E. and Rundensteiner, E.A. and Brown, D.C. and Ward, M.O.},
pages = {113--120},
keywords = { nominal data, visualization, dimension reduction, correspondence analysis, quantification, clustering, classing},
abstract = {Data sets with a large number of nominal variables, some with high cardinality, are becoming increasingly common and need to be explored. Unfortunately, most existing visual exploration displays are designed to handle numeric variables only. When importing data sets with nominal values into such visualization tools, most solutions to date are rather simplistic. Often, techniques that map nominal values to numbers do not assign order or spacing among the values in a manner that conveys semantic relationships. Moreover, displays designed for nominal variables usually cannot handle high cardinality variables well. This paper addresses the problem of how to display nominal variables in general-purpose visual exploration tools designed for numeric variables. Specifically, we investigate (1) how to assign order and spacing among the nominal values, and (2) how to reduce the number of distinct values to display. We propose that nominal variables be pre-processed using a distance-quantification-classing (DQC) approach before being imported into a visual exploration tool. In the distance step, we identify a set of independent dimensions that can be used to calculate the distance between nominal values. In the quantification step, we use the independent dimensions and the distance information to assign order and spacing among the nominal values. In the classing step, we use results from the previous steps to determine which values within a variable are similar to each other and thus can be grouped together. Each step in the DQC approach can be accomplished by a variety of techniques. We extended the XmdvTool package to incorporate this approach. We evaluated our approach on several data sets using a variety of evaluation measures.},
}
@inproceedings{p1705,
booktitle = {Proc. InfoVis},
year = 2003,
title = {MoireGraphs: radial focus+context visualization and interaction for graphs with visual nodes},
doi = {10.1109/INFVIS.2003.1249009},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249009},
author = {Jankun-Kelly, T.J. and Kwan-Liu Ma},
pages = {59--66},
keywords = {information visualization, focus+context, radial graph layout, graph drawing},
abstract = {Graph and tree visualization techniques enable interactive exploration of complex relations while communicating topology. However, most existing techniques have not been designed for situations where visual information such as images is also present at each node and must be displayed. This paper presents MoireGraphs to address this need. MoireGraphs combine a new focus+context radial graph layout with a suite of interaction techniques (focus strength changing, radial rotation, level highlighting, secondary foci, animated transitions and node information) to assist in the exploration of graphs with visual nodes. The method is scalable to hundreds of displayed visual nodes.},
}
@inproceedings{p1706,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Multiscale Visualization of Small World Networks},
doi = {10.1109/INFVIS.2003.1249011},
url = {http://doi.ieeecomputersociety.org/10.1109/INFVIS.2003.1249011},
author = {Aubert, D. and Chiricota, Y. and Jourdan, F. and Melançon, G.},
pages = {75--84},
keywords = {Small world networks, multiscale graphs,clustering metric, semantic zooming},
abstract = {Many networks under study in Information Visualization are "small world" networks. These networks first appeared in the study social networks and were shown to be relevant models in other application domains such as software reverse engineering and biology. Furthermore, many of these networks actually have a multiscale nature: they can be viewed as a network of groups that are themselves small world networks. We describe a metric that has been designed in order to identify the weakest edges in a small world network leading to an easy and low cost filtering procedure that breaks up a graph into smaller and highly connected components. We show how this metric can be exploited through an interactive navigation of the network based on semantic zooming. Once the network is decomposed into a hierarchy of sub-networks, a user can easily find groups and subgroups of actors and understand their dynamics.},
}
@inproceedings{p1707,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Smooth and efficient zooming and panning},
doi = {10.1109/INFVIS.2003.1249004},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249004},
author = {van Wijk, J.J. and Nuij, W.A.A.},
pages = {15--23},
keywords = {Navigation, zooming, panning, scrolling, scale space},
abstract = {Large 2D information spaces, such as maps, images, or abstract visualizations, require views at various level of detail: close ups to inspect details, overviews to maintain (literally) an overview. Users often switch between these views. We discuss how smooth animations from one view to another can be defined. To this end, a metric on the effect of simultaneous zooming and panning is defined, based on an estimate of the perceived velocity. Optimal is defined as smooth and efficient. Given the metric, these terms can be translated into a computational model, which is used to calculate an analytic solution for optimal animations. The model has two free parameters: animation speed and zoom/pan trade off. A user experiment to find good values for these is described.},
}
@misc{p1708,
year = 2003,
title = {Thinking with visualization},
doi = {10.1109/INFVIS.2003.1249001},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249001},
author = {Ware, C.},
pages = {3--3},
keywords = {},
abstract = {},
}
@inproceedings{p1709,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Thread Arcs: an email thread visualization},
doi = {10.1109/INFVIS.2003.1249028},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249028},
author = {Kerr, B.},
pages = {211--218},
keywords = {conversations, discussions, electronic mail, email, information visualization, threads, tree structures, user interfaces},
abstract = {This paper describes Thread Arcs, a novel interactive visualization technique designed to help people use threads found in email. Thread Arcs combine the chronology of messages with the branching tree structure of a conversational thread in a mixed-model visualization by Venolia and Neustaedter (2003) that is stable and compact. By quickly scanning and interacting with Thread Arcs, people can see various attributes of conversations and find relevant messages in them easily. We tested this technique against other visualization techniques with users' own email in a functional prototype email client. Thread Arcs proved an excellent match for the types of threads found in users' email for the qualities users wanted in small-scale visualizations.},
}
@inproceedings{p1710,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Using multilevel call matrices in large software projects},
doi = {10.1109/INFVIS.2003.1249030},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249030},
author = {van Ham, F.},
pages = {227--232},
keywords = {software visualization, multilevel visualization, call matrix},
abstract = {Traditionally, node link diagrams are the prime choice when it comes to visualizing software architectures. However, node link diagrams often fall short when used to visualize large graph structures. In this paper we investigate the use of call matrices as visual aids in the management of large software projects. We argue that call matrices have a number of advantages over traditional node link diagrams when the main object of interest is the link instead of the node. Matrix visualizations can provide stable and crisp layouts of large graphs and are inherently well suited for large multilevel visualizations because of their recursive structure. We discuss a number of visualization issues, using a very large software project currently under development at Philips Medical Systems as a running example.},
}
@inproceedings{p1711,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Visualization of Labeled Data Using Linear Transformation},
doi = {10.1109/INFVIS.2003.1249017},
url = {http://doi.ieeecomputersociety.org/10.1109/INFVIS.2003.1249017},
author = {Koren, Y. and Carmel, L.},
pages = {121--128},
keywords = {visualization, dimensionality-reduction, projection, principal component analysis, Fisher's linear discriminant analysis, eigenprojection, classification},
abstract = {We present a novel family of data-driven linear transformations, aimed at visualizing multivariate data in a low-dimensional space in a way that optimally preserves the structure of the data. The well-studied PCA and Fisher's LDA are shown to be special members in this family of transformations, and we demonstrate how to generalize these two methods such as to enhance their performance. Furthermore, our technique is the only one, to the best of our knowledge, that reflects in the resulting embedding both the data coordi-nates and pairwise similarities and/or dissimilarities between the data elements. Even more so, when information on the clustering (labeling) decomposition of the data is known, this information can be integrated in the linear transformation, resulting in embeddings that clearly show the separation between the clusters, as well as their intra-structure. All this makes our technique very flexible and powerful, and lets us cope with kinds of data that other techniques fail to describe properly.},
}
@inproceedings{p1712,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Visualization of large-scale customer satisfaction surveys using a parallel coordinate tree},
doi = {10.1109/INFVIS.2003.1249026},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249026},
author = {Brodbeck, D. and Girardin, L.},
pages = {197--201},
keywords = {parallel coordinates,focus+context, hierarchical data, satisfaction survey},
abstract = {Satisfaction surveys are an important measurement tool in fields such as market research or human resources management. Serious studies consist of numerous questions and contain answers from large population samples. Aggregation on both sides, the questions asked as well as the answers received, turns the multidimensional problem into a complex system of interleaved hierarchies. Traditional ways of presenting the results are limited to one-dimensional charts and cross-tables. We developed a visualization method called the Parallel Coordinate Tree that combines multidimensional analysis with a tree structure representation. Distortion-oriented focus+context techniques are used to facilitate interaction with the visualization. In this paper we present a design study of a commercial application that we built, using this method to analyze and communicate results from large-scale customer satisfaction surveys.},
}
@inproceedings{p1713,
booktitle = {Proc. InfoVis},
year = 2003,
title = {Visualizing evolving networks: minimum spanning trees versus pathfinder networks},
doi = {10.1109/INFVIS.2003.1249010},
url = {http://dx.doi.org/10.1109/INFVIS.2003.1249010},
author = {Chen, C. and Morris, S.},
pages = {67--74},
keywords = {Network evolution, network visualization, co-citation networks, Pathfinder networks, minimum spanning trees},
abstract = {Network evolution is an ubiquitous phenomenon in a wide variety of complex systems. There is an increasing interest in statistically modeling the evolution of complex networks such as small-world networks and scale-free networks. In this article, we address a practical issue concerning the visualizations of co-citation networks of scientific publications derived by two widely known link reduction algorithms, namely minimum spanning trees (MSTs) and pathfinder networks (PFNETs). Our primary goal is to identify the strengths and weaknesses of the two methods in fulfilling the need for visualizing evolving networks. Two criteria are derived for assessing visualizations of evolving networks in terms of topological properties and dynamical properties. We examine the animated visualization models of the evolution of botulinum toxin research in terms of its co-citation structure across a 58-year span (1945-2002). The results suggest that although high-degree nodes dominate the structure of MST models, such structures can be inadequate in depicting the essence of how the network evolves because MST removes potentially significant links from high-order shortest paths. In contrast, PFNET models clearly demonstrate their superiority in maintaining the cohesiveness of some of the most pivotal paths, which in turn make the growth animation more predictable and interpretable. We suggest that the design of visualization and modeling tools for network evolution should take the cohesiveness of critical paths into account.},
}
@inproceedings{p1795,
booktitle = {Proc. InfoVis},
year = 2002,
title = {A hybrid layout algorithm for sub-quadratic multidimensional scaling},
doi = {10.1109/INFVIS.2002.1173161},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173161},
author = {Morrison, A. and Ross, G. and Chalmers, M.},
pages = {152--158},
keywords = {},
abstract = {Many clustering and layout techniques have been used for structuring and visualising complex data. This paper is inspired by a number of such contemporary techniques and presents a novel hybrid approach based upon stochastic sampling, interpolation and spring models. We use Chalmers' 1996 O(N2) spring model as a benchmark when evaluating our technique, comparing layout quality and run times using data sets of synthetic and real data. Our algorithm runs in O(N√N) and executes significantly faster than Chalmers' 1996 algorithm, whilst producing superior layouts. In reducing complexity and run time, we allow the visualisation of data sets of previously infeasible size. Our results indicate that our method is a solid foundation for interactive and visual exploration of data.},
}
@inproceedings{p1796,
booktitle = {Proc. InfoVis},
year = 2002,
title = {A space-optimized tree visualization},
doi = {10.1109/INFVIS.2002.1173152},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173152},
author = {Quang Vinh Nguyen and Mao Lin Huang},
pages = {85--92},
keywords = {},
abstract = {We describe a new method for the visualization of tree structured relational data. It can be used especially for the display of very large hierarchies in a 2-dimensional space. We discuss the advantages and limitations of current techniques of tree visualization. Our strategy is to optimize the drawing of trees in a geometrical plane and maximize the utilization of display space by allowing more nodes and links to be displayed at a limit screen resolution. We use the concept of enclosure to partition the entire display space into a collection of local regions that are assigned to all nodes in tree T for the display of their sub-trees and themselves. To enable the exploration of large hierarchies, we use a modified semantic zooming technique to view the detail of a particular part of the hierarchy at a time based on user's interest. Layout animation is also provided to preserve the mental map while the user is exploring the hierarchy by changing zoomed views.},
}
@inproceedings{p1797,
booktitle = {Proc. InfoVis},
year = 2002,
title = {ACE: a fast multiscale eigenvectors computation for drawing huge graphs},
doi = {10.1109/INFVIS.2002.1173159},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173159},
author = {Koren, Y. and Carmel, L. and Harel, D.},
pages = {137--144},
keywords = {algebraic multigrid, multiscale/multilevel optimization, graph drawing, generalized eigenvalue problem, Fiedler vector, force directed layout, the Hall energy},
abstract = {We present an extremely fast graph drawing algorithm for very large graphs, which we term ACE (for Algebraic multigrid Computation of Eigenvectors). ACE exhibits an improvement of something like two orders of magnitude over the fastest algorithms we are aware of; it draws graphs of millions of nodes in less than a minute. ACE finds an optimal drawing by minimizing a quadratic energy function. The minimization problem is expressed as a generalized eigenvalue problem, which is rapidly solved using a novel algebraic multigrid technique. The same generalized eigenvalue problem seems to come up also in other fields, hence ACE appears to be applicable outside of graph drawing too.},
}
@inproceedings{p1798,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Angular brushing of extended parallel coordinates},
doi = {10.1109/INFVIS.2002.1173157},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173157},
author = {Hauser, H. and Ledermann, F. and Doleisch, H.},
pages = {127--130},
keywords = {information visualization, parallel coordinates, brushing, linear correlations, focus+context visualization},
abstract = {In this paper we present angular brushing for parallel coordinates (PC) as a new approach to highlighting rational data-properties, i.e., features which - in a non-separable way - depend on two data dimensions. We also demonstrate smooth brushing as an intuitive tool for specifying nonbinary degree-of-interest functions (for focus+context visualization). We also briefly describe our implementation as well as its application to the visualization of CFD data.},
}
@inproceedings{p1799,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Arc diagrams: visualizing structure in strings},
doi = {10.1109/INFVIS.2002.1173155},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173155},
author = {Wattenberg, M.},
pages = {110--116},
keywords = {string, sequence, visualization, arc diagram, music, text, code},
abstract = {This paper introduces a new visualization method, the arc diagram, which is capable of representing complex patterns of repetition in string data. Arc diagrams improve over previous methods such as dotplots because they scale efficiently for strings that contain many instances of the same subsequence. This paper describes design and implementation issues related to arc diagrams and shows how they may be applied to visualize such diverse data as music, text, and compiled code.},
}
@inproceedings{p1800,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Beamtrees: compact visualization of large hierarchies},
doi = {10.1109/INFVIS.2002.1173153},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173153},
author = {van Ham, F. and van Wijk, J.J.},
pages = {93--100},
keywords = {},
abstract = {Beamtrees are a new method for the visualization of large hierarchical data sets. Nodes are shown as stacked circular beams, such that both the hierarchical structure as well as the size of nodes are depicted. The dimensions of beams are calculated using a variation of the treemap algorithm. A small user study indicated that beamtrees are significantly more effective than nested treemaps and cushion treemaps for the extraction of global hierarchical information.},
}
@inproceedings{p1801,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Building a visual database for example-based graphics generation},
doi = {10.1109/INFVIS.2002.1173143},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173143},
author = {Zhou, M.X. and Chen, M. and Ying Feng},
pages = {23--30},
keywords = {},
abstract = {Example-based graphics generation systems automatically create new information visualizations by learning from existing graphic examples. As part of the effort on developing a general-purpose example-based generation system, we are building a visual database of graphic examples. In this paper, we address two main issues involved in constructing such a database: example selection and example modeling. As a result, our work offers three unique contributions: First, we build a visual database that contains a diverse collection of well-designed examples. Second, we develop a feature-based scheme to model all examples uniformly and accurately. Third, our visual database brings several important implications to the area of information visualization.},
}
@inproceedings{p1802,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Case study: visualizing sets of evolutionary trees},
doi = {10.1109/INFVIS.2002.1173150},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173150},
author = {Amenta, N. and Klingner, J.},
pages = {71--74},
keywords = {},
abstract = {We describe a visualization tool which allows a biologist to explore a large set of hypothetical evolutionary trees. Interacting with such a dataset allows the biologist to identify distinct hypotheses about how different species or organisms evolved, which would not have been clear from traditional analyses. Our system integrates a point-set visualization of the distribution of hypothetical trees with detail views of an individual tree, or of a consensus tree summarizing a subset of trees. Efficient algorithms were required for the key tasks of computing distances between trees, finding consensus trees, and laying out the point-set visualization.},
}
@inproceedings{p1803,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Demystifying venture capital investing},
doi = {10.1109/INFVIS.2002.1173162},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173162},
author = {Mei Chuah},
pages = {161--164},
keywords = {},
abstract = {Since the crash of the dot.coms, investors have gotten a lot more careful with where they place their money. Now more than ever it becomes really important for venture capitalists (VCs) to monitor the state of the startups market and continually update their investment strategy to suit the rapidly changing market conditions. This paper presents three new visualization metaphors (Spiral Map, TimeTicker, and Double Histogram) for monitoring the startups market. While we are focusing on the VC domain, the visual metaphors developed are general and can be easily applied to other domains.},
}
@misc{p1804,
year = 2002,
title = {Display design for the eye and mind},
doi = {10.1109/INFVIS.2002.1173164},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173164},
author = {Kosslyn, S.M.},
pages = {171--171},
keywords = {},
abstract = {},
}
@inproceedings{p1805,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Efficient cartogram generation: a comparison},
doi = {10.1109/INFVIS.2002.1173144},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173144},
author = {Keim, D.A. and North, S.C. and Panse, C. and Schneidewind, J.},
pages = {33--36},
keywords = {},
abstract = {Cartograms are a well-known technique for showing geography-related statistical information, such as population demographics and epidemiological data. The basic idea is to distort a map by resizing its regions according to a statistical parameter, but in a way that keeps the map recognizable. We deal with the problem of making continuous cartograms that strictly retain the topology of the input mesh. We compare two algorithms to solve the continuous cartogram problem. The first one uses an iterative relocation of the vertices based on scanlines. The second one is based on the Gridfit technique, which uses pixel-based distortion based on a quadtree-like data structure.},
}
@inproceedings{p1806,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Graphical encoding for information visualization: an empirical study},
doi = {10.1109/INFVIS.2002.1173146},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173146},
author = {Nowell, L. and Schulman, R. and Hix, D.},
pages = {43--50},
keywords = {},
abstract = {Research in several areas provides scientific guidance for use of graphical encoding to convey information in an information visualization display. By graphical encoding we mean the use of visual display elements such as icon color, shape, size, or position to convey information about objects represented by the icons. Literature offers inconclusive and often conflicting viewpoints, including the suggestion that the effectiveness of a graphical encoding depends on the type of data represented. Our empirical study suggests that the nature of the users' perceptual task is more indicative of the effectiveness of a graphical encoding than the type of data represented.},
}
@inproceedings{p1807,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Interactive information visualization of a million items},
doi = {10.1109/INFVIS.2002.1173156},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173156},
author = {Fekete, J. and Plaisant, C.},
pages = {117--124},
keywords = {},
abstract = {Existing information visualization techniques are usually limited to the display of a few thousand items. This article describes new interactive techniques capable of handling a million items (effectively visible and manageable on screen). We evaluate the use of hardware-based techniques available with newer graphics cards, as well as new animation techniques and non-standard graphical features such as stereovision and overlap count. These techniques have been applied to two popular information visualizations: treemaps and scatter plot diagrams; but are generic enough to be applied to other 2D representations as well.},
}
@misc{p1808,
year = 2002,
title = {Internet traffic: visualization, discovery, and very large displays},
doi = {10.1109/INFVIS.2002.1173140},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173140},
author = {Cleveland, W.S.},
pages = {3--4},
keywords = {},
abstract = {For a decade, the ruling common wisdom for Internet traffic held that it was everywhere bursty: over periods lasting tens of milliseconds to hundreds, the traffic was either much below its average rate or much above. In other words, the traffic was not smooth, not staying at all times close to its average. It was bursty on the cable running down a street, carrying the merged traffic of a small number of cable modem users in one section of a town. It was bursty on the core fiber of an Internet service provider, carrying the merged traffic of thousands of users from all over the country. The Internet was designed to accommodate the bursty traffic. The routers and switches that forward traffic from one place to the next were designed for burstiness, and Internet service providers allocated traffic loads on the devices based on an assumption of burstiness. Recently, it was discovered that the old common wisdom is not true. Visualization played a fundamental role in the discovery. The old wisdom held up for links with a small numbers of users. But as the number of users increases, the burstiness dissipates, and the traffic becomes smooth. Design of the high-load part of the Internet needs to be rethought. The old wisdom had persisted for high-load links because the databases of traffic measurements from them are immense, and the traffic measurements had not been studied in their fullest detail, which is necessary to see the smoothing. Visualization tools allowed the detail to be seen, and allowed the verification of a mathematical theory that predicts the smoothing. To see the detail, individual visual displays were created that take up an amount of virtual screen real estate measured in hundreds of pages. It is a simple idea: if you have a lot of data, and you want to see it in detail, you need a lot of space. What is needed now is a rich set of ideas and methods for navigating such very large displays.},
}
@inproceedings{p1809,
booktitle = {Proc. InfoVis},
year = 2002,
title = {InterRing: an interactive tool for visually navigating and manipulating hierarchical structures},
doi = {10.1109/INFVIS.2002.1173151},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173151},
author = {Jing Yang and Ward, M.O. and Rundensteiner, E.A.},
pages = {77--84},
keywords = {radial space-filling hierarchy visualizations, multi-focus distortion, structure-based brushing},
abstract = {Radial, space-filling (RSF) techniques for hierarchy visualization have several advantages over traditional node-link diagrams, including the ability to efficiently use the display space while effectively conveying the hierarchy structure. Several RSF systems and tools have been developed to date, each with varying degrees of support for interactive operations such as selection and navigation. We describe what we believe to be a complete set of desirable operations on hierarchical structures. We then present InterRing, an RSF hierarchy visualization system that supports a significantly more extensive set of these operations than prior systems. In particular, InterRing supports multi-focus distortions, interactive hierarchy reconfiguration, and both semi-automated and manual selection. We show the power and utility of these and other operations, and describe our on-going efforts to evaluate their effectiveness and usability.},
}
@inproceedings{p1810,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Multiple foci drill-down through tuple and attribute aggregation polyarchies in tabular data},
doi = {10.1109/INFVIS.2002.1173158},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173158},
author = {Conklin, N. and Prabhakar, S. and North, C.},
pages = {131--134},
keywords = {},
abstract = {Information analysis often involves decomposing data into sub-groups to allow for comparison and identification of relationships. Breakdown Visualization provides a mechanism to support this analysis through user guided drill-down of polyarchical metadata. This metadata describes multiple hierarchical structures for organizing tuple aggregations and table attributes. This structure is seen in financial data, organizational structures, sport statistics, and other domains. A spreadsheet format enables comparison of visualizations at any level of the hierarchy. Breakdown Visualization allows users to drill-down a single hierarchy then pivot into another hierarchy within the same view. It utilizes a fix and move technique that allows users to select multiple foci for drill-down.},
}
@inproceedings{p1811,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Multiscale visualization using data cubes},
doi = {10.1109/INFVIS.2002.1173141},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173141},
author = {Stolte, C. and Tang, D. and Hanrahan, P.},
pages = {7--14},
keywords = {},
abstract = {Most analysts start with an overview of the data before gradually refining their view to be more focused and detailed. Multiscale pan-and-zoom systems are effective because they directly support this approach. However generating abstract overviews of large data sets is difficult, and most systems take advantage of only one type of abstraction: visual abstraction. Furthermore, these existing systems limit the analyst to a single zooming path on their data and thus a single set of abstract views. This paper presents: (1) a formalism for describing multiscale visualizations of data cubes with both data and visual abstraction, and (2) a method for independently zooming along one or more dimensions by traversing a zoom graph with nodes at different levels of detail. As an example of how to design multiscale visualizations using our system, we describe four design patterns using our formalism. These design patterns show the effectiveness of multiscale visualization of general relational databases.},
}
@inproceedings{p1812,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Process visualization with levels of detail},
doi = {10.1109/INFVIS.2002.1173149},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173149},
author = {Matkovic, K. and Hauser, H. and Sainitzer, R. and Groller, E.},
pages = {67--70},
keywords = {process visualization, information visualization, levels of detail, focus+context visualization},
abstract = {We demonstrate how we apply information visualization techniques to process monitoring. Virtual instruments are enhanced using history encoding instruments are capable of displaying the current value and the value from the near past. Multi-instruments are capable of displaying several data sources simultaneously. Levels of detail for virtual instruments are introduced where the screen area is inversely proportional to the information amount displayed. Furthermore the monitoring system is enhanced by using: 3D anchoring attachment of instruments to positions on a 3D model, collision avoidance a physically based spring model prevents instruments from overlapping, and focus+context rendering - giving the user a possibility to examine particular instruments in detail without loosing the context information.},
}
@inproceedings{p1813,
booktitle = {Proc. InfoVis},
year = 2002,
title = {SpaceTree: supporting exploration in large node link tree, design evolution and empirical evaluation},
doi = {10.1109/INFVIS.2002.1173148},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173148},
author = {Plaisant, C. and Grosjean, J. and Bederson, B.B.},
pages = {57--64},
keywords = {},
abstract = {We present a novel tree browser that builds on the conventional node link tree diagrams. It adds dynamic rescaling of branches of the tree to best fit the available screen space, optimized camera movement, and the use of preview icons summarizing the topology of the branches that cannot be expanded. In addition, it includes integrated search and filter functions. This paper reflects on the evolution of the design and highlights the principles that emerged from it. A controlled experiment showed benefits for navigation to already previously visited nodes and estimation of overall tree topology.},
}
@inproceedings{p1814,
booktitle = {Proc. InfoVis},
year = 2002,
title = {The illusion of perceived metric 3D structure},
doi = {10.1109/INFVIS.2002.1173147},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173147},
author = {Lind, M. and Bingham, G.P. and Forser, C.},
pages = {51--56},
keywords = {},
abstract = {A large body of results on the characteristics of human spatial vision suggests that space perception is distorted. Recent studies indicate that the geometry of visual space is best understood as Affine. If this is the case, it has far reaching implications on how 3D visualizations can be successfully employed. For instance, all attempts to build visualization systems where users are expected to discover relations based on Euclidean distances or shapes will be ineffective. Because visualization can, and sometimes do, employ all possible types of depth information and because the results from vision research usually concentrates on one or two such types, three experiments were performed under near optimal viewing conditions. The aim of the experiments was twofold: To test whether the earlier findings generalize to optimal viewing conditions and to get a sense of the size of the error under such conditions. The results show that the findings do generalize and that the errors are large. The implications of these results for successful visualizations are discussed.},
}
@inproceedings{p1815,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Visual path analysis},
doi = {10.1109/INFVIS.2002.1173163},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173163},
author = {Keahey, T.A. and Eick, S.G.},
pages = {165--168},
keywords = {},
abstract = {We describe a system for analyzing the flow of traffic through Web sites. We decomposed the general path analysis problem into a set of distinct subproblems, and created a visual metaphor for analyzing each of them. Our system works off of multiple representations of the clickstream, and exposes the path extraction algorithms and data to the visual metaphors as Web services. We have combined the visual metaphors into a Web-based "path analysis portal" that lets the user easily switch between the different modes of analysis.},
}
@inproceedings{p1816,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Visual unrolling of network evolution and the analysis of dynamic discourse},
doi = {10.1109/INFVIS.2002.1173160},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173160},
author = {Brandes, U. and Corman, S.R.},
pages = {145--151},
keywords = {},
abstract = {A new method for visualizing the class of incrementally evolving networks is presented. In addition to the intermediate states of the network it conveys the nature of the change between them by unrolling the dynamics of the network. Each modification is shown in a separate layer of a three-dimensional representation, where the stack of layers corresponds to a time line of the evolution. We focus on discourse networks as the driving application, but our method extends to any type of network evolving in similar ways.},
}
@inproceedings{p1817,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Visualization schemas for flexible information visualization},
doi = {10.1109/INFVIS.2002.1173142},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173142},
author = {North, C. and Conklin, N. and Saini, V.},
pages = {15--22},
keywords = {},
abstract = {Relational databases provide significant flexibility to organize, store, and manipulate an infinite variety of complex data collections. This flexibility is enabled by the concept of relational data schemas, which allow data owners to easily design custom databases according to their unique needs. However, user interfaces and information visualizations for accessing and utilizing databases have not kept pace with this level of flexibility. This paper introduces the concept of visualization schemas, based on the Snap-Together Visualization model, which are analogous to relational data schemas. Visualization schemas enable users to rapidly construct customized multiple-view visualizations for databases in a similarly flexible fashion without programming. Since the design of appropriate visualizations for a given database depends on the data schema, visualization schemas are a natural analogy to the data schema concept.},
}
@inproceedings{p1818,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Visualizing biosequence data using texture mapping},
doi = {10.1109/INFVIS.2002.1173154},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173154},
author = {Thiagarajan, P.R. and Gao, G.R.},
pages = {103--109},
keywords = {},
abstract = {Data-mining of information by the process of pattern discovery in protein sequences has been predominantly algorithm based. We discuss a visualization approach, which uses texture mapping and blending techniques to perform visual data-mining on text data obtained from discovering patterns in protein sequences. This visual approach, investigates the possibilities of representing text data in three dimensions and provides new possibilities of representing more dimensions of information in text data visualization and analysis. We also present a generic framework derived from this visualization approach to visualize text in biosequence data.},
}
@inproceedings{p1819,
booktitle = {Proc. InfoVis},
year = 2002,
title = {Visualizing data with bounded uncertainty},
doi = {10.1109/INFVIS.2002.1173145},
url = {http://dx.doi.org/10.1109/INFVIS.2002.1173145},
author = {Olston, C. and Mackinlay, J.},
pages = {37--40},
keywords = {uncertainty visualization, bounded uncertainty},
abstract = {Visualization is a powerful way to facilitate data analysis, but it is crucial that visualization systems explicitly convey the presence, nature, and degree of uncertainty to users. Otherwise, there is a danger that data will be falsely interpreted, potentially leading to inaccurate conclusions. A common method for denoting uncertainty is to use error bars or similar techniques designed to convey the degree of statistical uncertainty. While uncertainty can often be modeled statistically, a second form of uncertainty, bounded uncertainty, can also arise that has very different properties than statistical uncertainty. Error bars should not be used for bounded uncertainty because they do not convey the correct properties, so a different technique should be used instead. We describe a technique for conveying bounded uncertainty in visualizations and show how it can be applied systematically to common displays of abstract charts and graphs. Interestingly, it is not always possible to show the exact degree of uncertainty, and in some cases it can only be displayed approximately.},
}
@inproceedings{p1898,
booktitle = {Proc. InfoVis},
year = 2001,
title = {2D vs 3D, implications on spatial memory},
doi = {10.1109/INFVIS.2001.963291},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963291},
author = {Tavanti, M. and Lind, M.},
pages = {139--145},
keywords = {},
abstract = {Since the introduction of graphical user interfaces (GUI) and two-dimensional (2D) displays, the concept of space has entered the information technology (IT) domain. Interactions with computers were re-encoded in terms of fidelity to the interactions with real environment and consequently in terms of fitness to cognitive and spatial abilities. A further step in this direction was the creation of three-dimensional (3D) displays which have amplified the fidelity of digital representations. However, there are no systematic results evaluating the extent to which 3D displays better support cognitive spatial abilities. The aim of this research is to empirically investigate spatial memory performance across different instances of 2D and 3D displays. Two experiments were performed. The displays used in the experimental situation represented hierarchical information structures. The results of the test show that the 3D display does improve performances in the designed spatial memory task.},
}
@inproceedings{p1899,
booktitle = {Proc. InfoVis},
year = 2001,
title = {A comparison of 2-D visualizations of hierarchies},
doi = {10.1109/INFVIS.2001.963290},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963290},
author = {Barlow, T. and Neville, P.},
pages = {131--138},
keywords = {},
abstract = {This paper describes two experiments that compare four two-dimensional visualizations of hierarchies: organization chart, icicle plot, treemap, and tree ring. The visualizations are evaluated in the context of decision tree analyses prevalent in data mining applications. The results suggest that either the tree ring or icicle plot is equivalent to the organization chart.},
}
@inproceedings{p1900,
booktitle = {Proc. InfoVis},
year = 2001,
title = {An empirical comparison of three commercial information visualization systems},
doi = {10.1109/INFVIS.2001.963289},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963289},
author = {Kobsa, A.},
pages = {123--130},
keywords = {},
abstract = {An empirical comparison of three commercial information visualization systems on three different databases is presented. The systems use different paradigms for visualizing data. Tasks were selected to be "ecologically relevant", i.e. meaningful and interesting in the respec- tive domains. Users of one system turned out to solve problems significantly faster than users of the other two, while users of another system would supply significantly more correct answers. Reasons for these results and general observations about the studied systems are discussed.},
}
@inproceedings{p1901,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Animated exploration of dynamic graphs with radial layout},
doi = {10.1109/INFVIS.2001.963279},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963279},
author = {Ka-Ping Yee and Fisher, D. and Dhamija, R. and Hearst, M.},
pages = {43--50},
keywords = {graph drawing, animation, interaction},
abstract = {We describe a new animation technique for supporting interactive exploration of a graph. We use the wellknown radial tree layout method, in which the view is determined by the selection of a focus node. Our main contribution is a method for animating the transition to a new layout when a new focus node is selected. In order to keep the transition easy to follow, the animation linearly interpolates the polar coordinates of the nodes, while enforcing ordering and orientation constraints. We apply this technique to visualizations of social networks and of the Gnutella file-sharing network, and discuss the results from our informal usability tests.},
}
@misc{p1902,
year = 2001,
title = {Battlespace visualization: a grand challenge},
doi = {10.1109/INFVIS.2001.963296},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963296},
author = {Posdamer, J.L. and Dantone, J. and Gershon, N. and Dale, J. and Hamburger, T. and Page, W.},
pages = {169--170},
keywords = {},
abstract = {},
}
@inproceedings{p1903,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Botanical visualization of huge hierarchies},
doi = {10.1109/INFVIS.2001.963285},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963285},
author = {Kleiberg, E. and van de Wetering, H. and van Wijk, J.J.},
pages = {87--94},
keywords = {botanical tree, logical tree, huge hierarchy, strands, tree visualization, directory tree, phyllotaxis},
abstract = {A new method for the visualization of huge hierarchical data structures is presented. The method is based on the observation that we can easily see the branches, leaves, and their arrangement in a botanical tree, despite of the large number of elements. The strand model of Holton is used to convert an abstract tree into a geometric model. Nonleaf nodes are mapped to branches and child nodes to subbranches. A naive application of this model leads to unsatisfactory results, hence it is tailored to suit our purposes better. Continuing branches are emphasized, long branches are contracted, and sets of leaves are shown as fruit. The method is applied to the visualization of directory structures. The elements, directories and files, as well as their relations can easily be extracted, thereby showing that the use of methods from botanical modeling can be effective for information visualization.},
}
@inproceedings{p1904,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Case study: design and assessment of an enhanced geographic information system for exploration of multivariate health statistics},
doi = {10.1109/INFVIS.2001.963294},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963294},
author = {Edsall, R.M. and MacEachren, A.M. and Pickle, L.},
pages = {159--162},
keywords = {},
abstract = {An implementation of an interactive parallel coordinate plot linked with the ArcView® geographic information system (GIS) is presented. The integrated geographic visualization system was created for the exploratory analysis of mortality data from specific cancers as they relate, specifically spatially, to other mortality causes and to demographic and socioeconomic risk factors. The linked and interactive parallel coordinate plot was tested with and compared to a similarly interactive and linked scatterplot in usability assessments designed to assess each representation’s relative effectiveness for exploration of these data sets. Evidence from these studies suggests that multivariate, spatial, and/or time series exploration is enhanced through the use of the parallel coordinate plot linked to maps.},
}
@inproceedings{p1905,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Case study: e-commerce clickstream visualization},
doi = {10.1109/INFVIS.2001.963293},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963293},
author = {Brainerd, J. and Becker, B.},
pages = {153--156},
keywords = {},
abstract = {We have developed an interactive, scalable visualization tool for analyzing the behavior of users of a web site. Our system not only shows site topology and traffic flow, but by segmenting site traffic data based on user attributes, including demographic data and purchase history, we can present a more complete picture of web site usage. This can lead to a more focussed analysis that allows direct comparison between user segments, and ultimately a deeper understanding of how users interact with a site. The tool is designed for real world use, and we present a usage study of the tool by analyzing the data of a failed “dot-com”.},
}
@inproceedings{p1906,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Case study: visualization for decision tree analysis in data mining},
doi = {10.1109/INFVIS.2001.963292},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963292},
author = {Barlow, T. and Neville, P.},
pages = {149--152},
keywords = {},
abstract = {Decision trees are one of the most popular methods of data mining. Decision trees partition large amounts of data into smaller segments by applying a series of rules. Creating and evaluating decision trees benefits greatly from visualization of the trees and diagnostic measures of their effectiveness. This paper describes an application, EMTree Results Viewer, that supports decision tree analysis through the visualization of model results and diagnosis. The functionality of the application and the visualization techniques are revealed through an example of churn analysis in the telecommunications industry.},
}
@inproceedings{p1907,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Change blindness in information visualization: a case study},
doi = {10.1109/INFVIS.2001.963274},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963274},
author = {Nowell, L. and Hetzler, E. and Tanasse, T.},
pages = {15--22},
keywords = {},
abstract = {Change blindness occurs when people do not notice changes in visible elements of a scene. If people use an information visualization system to compare document collection subsets partitioned by their time-stamps, change blindness makes it impossible for them to recognize even very major changes, let alone minor ones. We describe theories from cognitive science that account for the change blindness phenomenon, as well as solutions developed for two visual analysis tools, a dot plot ( SPIRE Galaxies) and landscape (ThemeView™) visualizations.},
}
@inproceedings{p1908,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Cluster stability and the use of noise in interpretation of clustering},
doi = {10.1109/INFVIS.2001.963275},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963275},
author = {Davidson, G. and Wylie, B. and Boyack, K.W.},
pages = {23--30},
keywords = {},
abstract = {A clustering and ordination algorithm suitable for mining extremely large databases, including those produced by microarray expression studies, is described and analyzed for stability. Data from a yeast cell cycle experiment with 6000 genes and 18 experimental measurements per gene are used to test this algorithm under practical conditions. The process of assigning database objects to an X,Y coordinate, ordination, is shown to be stable with respect to random starting conditions, and with respect to minor perturbations in the starting similarity estimates. Careful analysis of the way clusters typically co-locate, versus the occasional large displacements under different starting conditions are shown to be useful in interpreting the data. This extra stability information is lost when only a single cluster is reported, which is currently the accepted practice. However, it is believed that the approaches presented here should become a standard part of best practices in analyzing computer clustering of large data collections.},
}
@inproceedings{p1909,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Collapsible cylindrical trees: a fast hierarchical navigation technique},
doi = {10.1109/INFVIS.2001.963284},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963284},
author = {Dachselt, R. and Ebert, J.},
pages = {79--86},
keywords = {visualization, web navigation, hierarchy, interactive tree, sitemap, 3D graphics, VRML, XML},
abstract = {This paper proposes a new visualization and interaction technique for medium-sized trees, called Collapsible Cylindrical Trees (CCT). Child nodes are mapped on rotating cylinders, which will be dynamically displayed or hidden to achieve a useful balance of detail and context. Besides a comprehensible threedimensional visualization of trees, the main feature of CCT is a very fast and intuitive interaction with the displayed nodes. Only a single click is needed to reach every node and perform an action on it, such as displaying a web page. The CCT browsing technique was developed for interaction with web hierarchies but is not limited to this domain. We also present sample implementations of CCT using VRML, which show the usefulness of this intuitive tree navigation technique.},
}
@inproceedings{p1910,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Effective graph visualization via node grouping},
doi = {10.1109/INFVIS.2001.963280},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963280},
author = {Six, J.M. and Tollis, I.G.},
pages = {51--58},
keywords = {Graph Drawing, Graph Visualization, Force-Directed Drawing, Orthogonal Drawing, Node Grouping, Experimental Studies},
abstract = {We discuss four methodologies for the application of node grouping in graph visualization. In addition, we introduce techniques for force-directed and orthogonal drawing which use node grouping information and have been shown in experiments to perform better than previous techniques. Not only do these techniques have significantly improved performance with respect to standard aesthetic measures, but they also attain qualitative improvement.},
}
@inproceedings{p1911,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Getting along: composition of visualization paradigms},
doi = {10.1109/INFVIS.2001.963278},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963278},
author = {Keahey, T.A.},
pages = {37--40},
keywords = {},
abstract = {This paper describes how focus+context techniques can be composed with other high-level visualization paradigms to mutual advantage. Examples are given showing composition both with a pan&zoom system, and with a treemap implementation. The examples illustrate how focus+context can be used as an exploration and navigation tool within those paradigms.},
}
@inproceedings{p1912,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Graph sketches},
doi = {10.1109/INFVIS.2001.963282},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963282},
author = {Abello, J. and Finocchi, I. and Korn, J.},
pages = {67--70},
keywords = {visualization, massive data sets, graphs, hierarchies},
abstract = {We introduce the notion of Graph Sketches. They can be thought of as visual indices that guide the navigation of a multi-graph too large to fit on the available display. We adhere to the Visual Information-Seeking Mantra: Overview first, zoom and filter, then details on demand. Graph Sketches are incorporated into MGV, an integrated visualization and exploration system for massive multi-digraph navigation. We highlight the main algorithmic and visualization tasks behind the computation of Graph Sketches and illustrate several application scenarios. Graph Sketches will be used to guide the navigation of multi-digraphs defined on vertex sets with sizes ranging from 100 to 250 million vertices.},
}
@inproceedings{p1913,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Graphic data display for cardiovascular system},
doi = {10.1109/INFVIS.2001.963295},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963295},
author = {Agutter, J. and Syroid, N. and Drews, F. and Westenskow, D. and Bermudez, J. and Strayer, D.},
pages = {163--166},
keywords = {},
abstract = {Our multi-disciplinary group has developed a visual representation for cardiovascular physiological variables. This enhances a clinician’s ability to detect and rapidly respond to critical events. The integrated and intuitive display communicates a patient’s cardiovascular state so that it is easily and quickly understood without prior training. The display is designed to show patterns of functional relationships that aid in the detection, diagnosis, and treatment of a critical event.},
}
@inproceedings{p1914,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Interactive visualization of multiple query results},
doi = {10.1109/INFVIS.2001.963287},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963287},
author = {Havre, S. and Hetzler, E. and Perrine, K. and Jurrus, E. and Miller, N.},
pages = {105--112},
keywords = {},
abstract = {This paper introduces a graphical method for visually presenting and exploring the results of multiple queries simultaneously. This method allows a user to visually compare multiple query result sets, explore various combinations among the query result sets, and identify the "best" matches for combinations of multiple independent queries. This approach might also help users explore methods for progressively improving queries by visually comparing the improvement in result sets.},
}
@inproceedings{p1915,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Ordered treemap layouts},
doi = {10.1109/INFVIS.2001.963283},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963283},
author = {Shneiderman, B. and Wattenberg, M.},
pages = {73--78},
keywords = {treemaps, ordered treemaps, trees, hierarchies, information visualization },
abstract = {Treemaps, a space-filling method of visualizing large hierarchical data sets, are receiving increasing attention. Several algorithms have been proposed to create more useful displays by controlling the aspect ratios of the rectangles that make up a treemap. While these algorithms do improve visibility of small items in a single layout, they introduce instability over time in the display of dynamically changing data, and fail to preserve an ordering of the underlying data. This paper introduces the ordered treemap, which addresses these two shortcomings. The ordered treemap algorithm ensures that items near each other in the given order will be near each other in the treemap layout. Using experimental evidence from Monte Carlo trials, we show that compared to other layout algorithms ordered treemaps are more stable while maintaining relatively favorable aspect ratios of the constituent rectangles. A second test set uses stock market data.},
}
@inproceedings{p1916,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Pixel bar charts: a new technique for visualizing large multi-attribute data sets without aggregation},
doi = {10.1109/INFVIS.2001.963288},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963288},
author = {Keim, D.A. and Hao, M.C. and Ladisch, J. and Hsu, M. and Dayal, U.},
pages = {113--120},
keywords = {},
abstract = {Simple presentation graphics are intuitive and easy-to-use, but show only highly aggregated data and present only a very limited number of data values (as in the case of bar charts). In addition, these graphics may have a high degree of overlap which may occlude a significant portion of the data values (as in the case of the x-y plots). In this paper, we therefore propose a generalization of traditional bar charts and x-y-plots which allows the visualization of large amounts of data. The basic idea is to use the pixels within the bars to present the detailed information of the data records. Our so-called pixel bar charts retain the intuitiveness of traditional bar charts while allowing very large data sets to be visualized in an effective way. We show that, for an effective pixel placement, we have to solve complex optimization problems, and present an algorithm which efficiently solves the problem. Our application using real-world e-commerce data shows the wide applicability and usefulness of our new idea.},
}
@inproceedings{p1917,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Semantic depth of field},
doi = {10.1109/INFVIS.2001.963286},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963286},
author = {Kosara, R. and Miksch, S. and Hauser, H.},
pages = {97--104},
keywords = {Depth of Field, Focus and Context, Information Visualization},
abstract = {We present a new technique called Semantic Depth of Field (SDOF) as an alternative approach to focus-and-context displays of information. We utilize a well-known method from photography and cinematography (depth-of-field effect) for information visualization, which is to blur different parts of the depicted scene in dependence of their relevance. Independent of their spatial locations, objects of interest are depicted sharply in SDOF, whereas the context of the visualization is blurred. In this paper, we present a flexible model of SDOF which can be easily adopted to various types of applications. We discuss pros and cons of the new technique, give examples of application, and describe a fast prototype implementation of SDOF.},
}
@inproceedings{p1918,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Technical note: visually encoding program test information to find faults in software},
doi = {10.1109/INFVIS.2001.963277},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963277},
author = {Eagan, J. and Harrold, M.J. and Jones, J.A. and Stasko, J.},
pages = {33--36},
keywords = {},
abstract = {Large test suites are frequently used to evaluate software systems and to locate errors. Unfortunately, this process can generate a huge amount of data that is difficult to interpret manually. We have created a system, TARANTULA, that visually encodes test data to help find program errors. The system uses a principled color mapping to represent source lines in passed and failed tests. It also provides a flexible user interface for examining different perspectives that show the behavior of the source code on test sets, ranging from individual tests, to important subsets such as the set of failed tests, to the entire test suite.},
}
@misc{p1919,
year = 2001,
title = {To draw a tree},
doi = {10.1109/INFVIS.2001.963272},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963272},
author = {Hanrahan, P.},
pages = {3--3},
keywords = {},
abstract = {},
}
@inproceedings{p1920,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Visualization of state transition graphs},
doi = {10.1109/INFVIS.2001.963281},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963281},
author = {van Ham, F. and van de Wetering, H. and van Wijk, J.J.},
pages = {59--66},
keywords = {},
abstract = {A new method for the visualization of state transition graphs is presented. Visual information is reduced by clustering nodes, forming a tree structure of related clusters. This structure is visualized in three dimensions with concepts from cone trees and emphasis on symmetry. The resulting visualization makes it easier to relate features in the visualization of the state transition graph to semantic concepts in the corresponding process and vice versa.},
}
@inproceedings{p1921,
booktitle = {Proc. InfoVis},
year = 2001,
title = {Visualizing time-series on spirals},
doi = {10.1109/INFVIS.2001.963273},
url = {http://dx.doi.org/10.1109/INFVIS.2001.963273},
author = {Weber, M. and Alexa, M. and Muller, W.},
pages = {7--13},
keywords = {Information Visualization, Graph Drawing, Visualization of Time-Series Data, Data Mining },
abstract = {In this paper, we present a new approach for the visualization of time-series data based on spirals. Different to classical bar charts and line graphs, the spiral is suited to visualize large data sets and supports much better the identification of periodic structures in the data. Moreover, it supports both the visualization of nominal and quantitative data based on a similar visualization metaphor. The extension of the spiral visualization to 3D gives access to concepts for zooming and focusing and linking in the data set. As such, spirals complement other visualization techniques for time series and specifically enhance the identication of periodic patterns.},
}
@inproceedings{p1996,
booktitle = {Proc. InfoVis},
year = 2000,
title = {A scalable framework for information visualization},
doi = {10.1109/INFVIS.2000.885088},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885088},
author = {Kreuseler, M. and Lopez, N. and Schumann, H.},
pages = {27--36},
keywords = {},
abstract = {The paper describes major concepts of a scalable information visualization framework. We assume that the exploration of heterogeneous information spaces at arbitrary levels of detail requires a suitable preprocessing of information quantities, the combination of different graphical interfaces and the illustration of the frame of reference of given information sets. The innovative features of our system include: dynamic hierarchy computation and user controlled refinement of those hierarchies for preprocessing unstructured information spaces; a new Focus+Context technique for visualizing complex hierarchy graphs; a new paradigm for visualizing information structures within their frame of reference; and a new graphical interface that utilizes textual similarities to arrange objects of high dimensional information space in 3-dimensional visualization space},
}
@inproceedings{p1997,
booktitle = {Proc. InfoVis},
year = 2000,
title = {A taxonomy of visualization techniques using the data state reference model},
doi = {10.1109/INFVIS.2000.885092},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885092},
author = {Chi, E.H.},
pages = {69--75},
keywords = {Information Visualization, Data State Model,Reference Model, Taxonomy, Techniques, Operators},
abstract = {In previous work, researchers have attempted to construct taxonomies of information visualization techniques by examining the data domains that are compatible with these techniques. This is useful because implementers can quickly identify various techniques that can be applied to their domain of interest. However, these taxonomies do not help the implementers understand how to apply and implement these techniques. The author extends and proposes a new way to taxonomize information visualization techniques by using the Data State Model (E.H. Chi and J.T. Reidl, 1998). In fact, as the taxonomic analysis in the paper will show, many of the techniques share similar operating steps that can easily be reused. The paper shows that the Data State Model not only helps researchers understand the space of design, but also helps implementers understand how information visualization techniques can be applied more broadly},
}
@inproceedings{p1998,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Collaborative geographic visualization: enabling shared understanding of environmental processes},
doi = {10.1109/INFVIS.2000.885102},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885102},
author = {Brewer, I. and MacEachren, A.M. and Abdo, H. and Gundrum, J. and Otto, G.},
pages = {137--141},
keywords = {},
abstract = {We describe a prototype same-time/different-place collaborative geovisualization environment. We outline an approach to understanding use and usability and present results of interviews with domain experts about the ways in which collaborative visualization might enable groups to work at a distance. One goal for our research is to design an effective and flexible system that can support group work on environmental science research mediated through dynamic geovisualization displays. We are addressing this goal using a four-step human-centered system design process, modeled on that proposed by (Gabbard et al., 1999) for development and evaluation of virtual environments. The steps they delineate are: user task analysis; expert guideline-based evaluation; formative user-centered evaluation; and summative comparative evaluation},
}
@inproceedings{p2000,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Creativity, complexity, and precision: information visualization for (landscape) architecture},
doi = {10.1109/INFVIS.2000.885105},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885105},
author = {Buscher, M. and Shapiro, D. and Christensen, M. and Mogensen, P. and Orbaek, P.},
pages = {167--171},
keywords = {Information visualization, architecture, work materials, context, \nspatio-temporal order, electronic workspace },
abstract = {Drawing on ethnographic studies of (landscape) architects at work, this paper presents a human-centered approach to information visualization. A 3D collaborative electronic workspace allows people to configure, save and browse arrangements of heterogeneous work materials. Spatial arrangements and links are created and maintained as an integral part of ongoing work with `live' documents and objects. The result is an extension of the physical information space of the architects' studio that utilizes the potential of electronic data storage, visualization and network technologies to support work with information in context},
}
@inproceedings{p2001,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Density functions for visual attributes and effective partitioning in graph visualization},
doi = {10.1109/INFVIS.2000.885090},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885090},
author = {Herman, I. and Marshall, M.S. and Melancon, G.},
pages = {49--56},
keywords = {graph visualization, graph navigation, metrics, clustering},
abstract = {Two tasks in graph visualization require partitioning: the assignment of visual attributes and divisive clustering. Often, we would like to assign a color or other visual attributes to a node or edge that indicates an associated value. In an application involving divisive clustering, we would like to partition the graph into subsets of graph elements based on metric values in such a way that all subsets are evenly populated. Assuming a uniform distribution of metric values during either partitioning or coloring can have undesired effects such as empty clusters or only one level of emphasis for the entire graph. Probability density functions derived from statistics about a metric can help systems succeed at these tasks},
}
@inproceedings{p2002,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Focus+context display and navigation techniques for enhancing radial, space-filling hierarchy visualizations},
doi = {10.1109/INFVIS.2000.885091},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885091},
author = {Stasko, J. and Zhang, E.},
pages = {57--65},
keywords = {},
abstract = {Radial, space-filling visualizations can be useful for depicting information hierarchies, but they suffer from one major problem. As the hierarchy grows in size, many items become small, peripheral slices that are difficult to distinguish. We have developed three visualization/interaction techniques that provide flexible browsing of the display. The techniques allow viewers to examine the small items in detail while providing context within the entire information hierarchy. Additionally, smooth transitions between views help users maintain orientation within the complete information space},
}
@inproceedings{p2003,
booktitle = {Proc. InfoVis},
year = 2000,
title = {From metaphor to method: cartographic perspectives on information visualization},
doi = {10.1109/INFVIS.2000.885095},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885095},
author = {Skupin, A.},
pages = {91--97},
keywords = {},
abstract = {By virtue of their spatio-cognitive abilities, humans are able to navigate through geographic space as well as meaningfully communicate geographic information represented in cartographic form. The current dominance of spatial metaphors in information visualization research is the result of the realization that those cognitive skills also have value in the exploration and analysis of non-geographic information. While mapping or landscape metaphors are routinely used in this field, there is a noticeable lack of consideration for existing cartographic expertise. This is especially apparent whenever problematic issues are encountered, such as graphic complexity or feature labeling. There are a number of areas in which a cartographic outlook could provide a valuable perspective. This paper discusses how geographic and cartographic notions may influence the design of visualizations for textual information spaces. Map projections, generalization, feature labeling and map design issues are discussed},
}
@inproceedings{p2004,
booktitle = {Proc. InfoVis},
year = 2000,
title = {GADGET/IV: a taxonomic approach to semi-automatic design of information visualization applications using modular visualization environment},
doi = {10.1109/INFVIS.2000.885093},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885093},
author = {Fujishiro, I. and Ichikawa, Y. and Furuhata, R. and Takeshima, Y.},
pages = {77--83},
keywords = {},
abstract = {Since novice users of visualization systems lack knowledge and expertise in data visualization, it is a tough task for them to generate efficient and effective visualizations that allow them to comprehend information that is embedded in the data. Therefore, systems supporting the users to design appropriate visualizations are of great importance. The GADGET (Goal-oriented Application Design Guidance for modular visualization EnvironmenTs) system, which has been developed by the authors (1997), interactively helps users to design scientific visualization applications by presenting appropriate MVE (Modular Visualization Environment) prototypes according to the specification of the visualization goals expressed mainly with the Wehrend matrix (S. Wehrend & C. Lewis, 1990). This paper extends this approach in order to develop a system named GADGET/IV, which is intended to provide the users with an environment for semi-automatic design of information visualization (IV) applications. To this end, a novel goal-oriented taxonomy of IV techniques is presented. Also, an initial design of the system architecture and user assistance flow is described. The usefulness of the GADGET/IV system is illustrated with example problems of Web site access frequency analysis},
}
@inproceedings{p2005,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Getting portals to behave},
doi = {10.1109/INFVIS.2000.885087},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885087},
author = {Olston, C. and Woodruff, A.},
pages = {15--25},
keywords = {Portals, Multiple Views, Data Visualization},
abstract = {Data visualization environments help users understand and analyze their data by permitting interactive browsing of graphical representations of the data. To further facilitate understanding and analysis, many visualization environments have special features known as portals, which are sub-windows of a data canvas. Portals provide a way to display multiple graphical representations simultaneously, in a nested fashion. This makes portals an extremely powerful and flexible paradigm for data visualization. Unfortunately, with this flexibility comes complexity. There are over a hundred possible ways each portal can be configured to exhibit different behaviors. Many of these behaviors are confusing and certain behaviors can be inappropriate for a particular setting. It is desirable to eliminate confusing and inappropriate behaviors. The authors construct a taxonomy of portal behaviors and give recommendations to help designers of visualization systems decide which behaviors are intuitive and appropriate for a particular setting. They apply these recommendations to an example setting that is fully visually programmable and analyze the resulting reduced set of behaviors. Finally, the authors consider a real visualization environment and demonstrate some problems associated with behaviors that do not follow their recommendations},
}
@inproceedings{p2006,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Information content measures of visual displays},
doi = {10.1109/INFVIS.2000.885096},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885096},
author = {Yang-Pelaez, J. and Flowers, W.C.},
pages = {99--103},
keywords = {},
abstract = {With an increase in the number of different visualization techniques, it becomes necessary to develop a measure for evaluating the effectiveness of visualizations. Metrics to evaluate visual displays were developed based on measures of information content developed by Shannon and used in communication theory. These measures of information content can be used to quantify the relative effectiveness of displays},
}
@inproceedings{p2007,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Interactive problem solving via algorithm visualization},
doi = {10.1109/INFVIS.2000.885103},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885103},
author = {Pu, P. and Lalanne, D.},
pages = {145--153},
keywords = {},
abstract = {COMIND is a tool for conceptual design of industrial products. It helps designers define and evaluate the initial design space by using search algorithms to generate sets of feasible solutions. Two algorithm visualization techniques, Kaleidoscope and Lattice, and one visualization of n-dimensional data, MAP, are used to externalize the machine's problem solving strategies and the tradeoffs as a result of using these strategies. After a short training period, users are able to discover tactics to explore design space effectively, evaluate new design solutions, and learn important relationships among design criteria, search speed and solution quality. We thus propose that visualization can serve as a tool for interactive intelligence, ie., human-machine collaboration for solving complex problems},
}
@inproceedings{p2008,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Lighthouse: showing the way to relevant information},
doi = {10.1109/INFVIS.2000.885099},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885099},
author = {Leuski, A. and Allan, J.},
pages = {125--129},
keywords = {},
abstract = {Lighthouse is an on-line interface for a Web-based information retrieval system. It accepts queries from a user, collects the retrieved documents from the search engine, organizes and presents them to the user. The system integrates two known presentations of the retrieved results, the ranked list and clustering visualization, in a novel and effective way. It accepts the user's input and adjusts the document visualization accordingly. We give a brief overview of the system},
}
@inproceedings{p2009,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Metaphor-aware 3D navigation},
doi = {10.1109/INFVIS.2000.885104},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885104},
author = {Russo dos Santos, C. and Gros, P. and Abel, P. and Loisel, D. and Trichaud, N. and Paris, J.P.},
pages = {155--165},
keywords = {},
abstract = {Anyone who has ever experienced three-dimensional (3D) interfaces will agree that navigating in a 3D world is not a trivial task. The user interface of traditional 3D browsers provides simple navigation tools that allow the user to modify the camera parameters such as orientation, position and focal. Using these tools, it is frequent that, after some movements, the user is lost in the virtual 3D space and usually tries to restart from the beginning. We present how the 3D navigation problem is addressed in the context of the CyberNet project (Abel et al., 2000). Our underlying principle is to help the user navigate by adapting the navigation tool to the virtual world. We feel that the navigation schemes provided by the 3D browsers are too generic for some specific 3D tools and we have developed adaptive navigation features that are dependent on the 3D metaphor used for visualizing the information and on the user's task},
}
@inproceedings{p2010,
booktitle = {Proc. InfoVis},
year = 2000,
title = {New methods for the visualization of electric power system information},
doi = {10.1109/INFVIS.2000.885101},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885101},
author = {Overbye, T.J. and Weber, J.D.},
pages = {131--16c},
keywords = {},
abstract = {One area in need of new research in information visualization is the operation and analysis of large-scale electric power systems. In analyzing power systems, one is usually confronted with a large amount of multivariate data. With systems containing tens of thousands of electrical nodes (buses), a key challenge is to present this data in a form so the user can assess the state of the system in an intuitive and quick manner. This is particularly true when trying to analyze relationships between actual network power flows, the scheduled power flows, and the capacity of the transmission system. With electric industry restructuring and the move towards having a single entity, such as an independent system operator or pool, operate a much larger system, this need has become more acute. This paper presents several power system visualization techniques to help in this task. These techniques include animation of power system flow values, contouring of bus and transmission line flow values, data aggregation techniques and interactive 3D data visualization},
}
@inproceedings{p2011,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Polaris: a system for query, analysis and visualization of multi-dimensional relational databases},
doi = {10.1109/INFVIS.2000.885086},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885086},
author = {Stolte, C. and Hanrahan, P.},
pages = {5--14},
keywords = {},
abstract = {In the last several years, large multi-dimensional databases have become common in a variety of applications such as data warehousing and scientific computing. Analysis and exploration tasks place significant demands on the interfaces to these databases. Because of the size of the data sets, dense graphical representations are more effective for exploration than spreadsheets and charts. Furthermore, because of the exploratory nature of the analysis, it must be possible for the analysts to change visualizations rapidly as they pursue a cycle involving first hypothesis and then experimentation. The authors present Polaris, an interface for exploring large multi-dimensional databases that extends the well-known Pivot Table interface. The novel features of Polaris include an interface for constructing visual specifications of table based graphical displays and the ability to generate a precise set of relational queries from the visual specifications. The visual specifications can be rapidly and incrementally developed, giving the analyst visual feedback as they construct complex queries and visualizations},
}
@inproceedings{p2012,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Redefining the focus and context of focus+context visualization},
doi = {10.1109/INFVIS.2000.885094},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885094},
author = {Bjork, S. and Redstrom, J.},
pages = {85--89},
keywords = {},
abstract = {The increasing diversity of computers, especially among small mobile devices such as mobile phones and PDAs, raise new questions about information visualization techniques developed for the desktop computer. Using a series of examples ranging from applications for ordinary desktop displays to web-browsers and other applications for PDAs, we describe how a focus+context technique, Flip Zooming, is changed due to the situation it is used in. Based on these examples, we discuss how the use of ÔÇ£focusÔÇØ and ÔÇ£contextÔÇØ in focus+context techniques change in order to fit new areas of use for information visualization},
}
@inproceedings{p2013,
booktitle = {Proc. InfoVis},
year = 2000,
title = {ThemeRiver: visualizing theme changes over time},
doi = {10.1109/INFVIS.2000.885098},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885098},
author = {Havre, S. and Hetzler, E. and Nowell, L.},
pages = {115--123},
keywords = {},
abstract = {ThemeRiverTM is a prototype system that visualizes thematic variations over time within a large collection of documents. The “river” flows from left to right through time, changing width to depict changes in thematic strength of temporally associated documents. Colored “currents” flowing within the river narrow or widen to indicate decreases or increases in the strength of an individual topic or a group of topics in the associated documents. The river is shown within the context of a timeline and a corresponding textual presentation of external events},
}
@inproceedings{p2014,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Using Visualization to Detect Plagiarism in Computer Science Classes},
doi = {10.0000/00000001},
url = {http://dl.acm.org/citation.cfm?id=857699},
author = {Ribler, R. L. and Abrams, M.},
keywords = {},
abstract = {This paper introduces a number of general methods for visualizing commonality in sets of text files. Each visualization simultaneously compares one file in the set to all other files in the set. These visualizations, which can be computed in O(n) time and space, are explained and then applied to the problem of detecting plagiarism in large computer science classes. A case study is presented and sample visualizations are provided. Finally, a new interactive tool that can be used to produce and manipulate these visualizations is presented.},
}
@inproceedings{p2016,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Visualizing massive multi-digraphs},
doi = {10.1109/INFVIS.2000.885089},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885089},
author = {Abello, J. and Korn, J.},
pages = {39--47},
keywords = {visualization, massive data sets, graphs, hierarchies,\nout-of-core algorithms},
abstract = {We describe MGV, an integrated visualization and exploration system for massive multi-digraph navigation. MGV's only assumption is that the vertex set of the underlying digraph corresponds to the set of leaves of a predetermined tree T. MGV builds an out-of-core graph hierarchy and provides mechanisms to plug in arbitrary visual representations for each graph hierarchy slice. Navigation from one level to another of the hierarchy corresponds to the implementation of a drill-down interface. In order to provide the user with navigation control and interactive response, MGV incorporates a number of visualization techniques like interactive pixel-oriented 2D and 3D maps, statistical displays, multi-linked views, and a zoomable label based interface. This makes the association of geographic information and graph data very natural. MGV follows the client-server paradigm and it is implemented in C and Java-3D. We highlight the main algorithmic and visualization techniques behind the tools and point out along the way several possible application scenarios. Our techniques are being applied to multi-graphs defined on vertex sets with sizes ranging from 100 million to 250 million vertices},
}
@inproceedings{p2017,
booktitle = {Proc. InfoVis},
year = 2000,
title = {Visualizing sequential patterns for text mining},
doi = {10.1109/INFVIS.2000.885097},
url = {http://dx.doi.org/10.1109/INFVIS.2000.885097},
author = {Pak Chung Wong and Cowley, W. and Foote, H. and Jurrus, E. and Thomas, J.},
pages = {105--111},
keywords = {},
abstract = {A sequential pattern in data mining is a finite series of elements such as ABCD where A, B, C, and D are elements of the same domain. The mining of sequential patterns is designed to find patterns of discrete events that frequently happen in the same arrangement along a timeline. Like association and clustering, the mining of sequential patterns is among the most popular knowledge discovery techniques that apply statistical measures to extract useful information from large datasets. As out computers become more powerful, we are able to mine bigger datasets and obtain hundreds of thousands of sequential patterns in full detail. With this vast amount of data, we argue that neither data mining nor visualization by itself can manage the information and reflect the knowledge effectively. Subsequently, we apply visualization to augment data mining in a study of sequential patterns in large text corpora. The result shows that we can learn more and more quickly in an integrated visual data-mining environment},
}
@inproceedings{p2104,
booktitle = {Proc. InfoVis},
year = 1999,
title = {3D interactive visualization for inter-cell dependencies of spreadsheets},
doi = {10.1109/INFVIS.1999.801861},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801861},
author = {Shiozawa, H. and Okada, K. and Matsushita, Y.},
pages = {79--82, 148},
keywords = {information visualization, 3D user interfaces, spreadsheets, inter-cell dependencies, lifting-up operation, Natto View},
abstract = {This paper proposes a new technique to visualize dependencies among cells in a spreadsheet. In this way, the system firstly visualizes a spreadsheet on a plane in three-dimensional space, and draws arcs between interrelated cells. By allowing a user to select an arbitrary cell and lift it up with direct manipulation, the system utilizes the third dimension to ameliorate visual occlusion of crossing arcs. As the user lifts a focused cell up, the interrelated cells are lifted up together; thus hidden dataflow networks can be visually intelligible interactively. Because spreadsheets are aimed at calculation itself rather than appearances of outputs, their mechanism is relatively invisible and not obvious for ordinary users. Our visualization helps such users to understand structures and mechanism of spreadsheets},
}
@inproceedings{p2105,
booktitle = {Proc. InfoVis},
year = 1999,
title = {A framework for focus+context visualization},
doi = {10.1109/INFVIS.1999.801857},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801857},
author = {Bjork, S. and Holmquist, L.E. and Redstrom, J.},
pages = {53--56, 145},
keywords = {Focus+context visualization, information visualization, fisheye views, formal methods, theory},
abstract = {This paper aims to give a systematic account of focus+context visualization techniques, i.e. visualizations which aim to give users integrated visual access to details and context in a data set. We introduce the notion that there are different orders of information visualization with focus+context being a second-order visualization and provide a formal framework for describing and constructing focus+context visualizations},
}
@inproceedings{p2106,
booktitle = {Proc. InfoVis},
year = 1999,
title = {A Java-based visual mining infrastructure and applications},
doi = {10.1109/INFVIS.1999.801867},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801867},
author = {Hao, M.C. and Dayal, U. and Hsu, M. and Baker, J. and D'Eletto, R.},
pages = {124--127, 153},
keywords = {},
abstract = {Many real-world KDD (Knowledge Discovery & Data Mining) applications involve the navigation of large volumes of information on the web, such as, Internet resources, hot topics, and telecom phone switches. Quite often users feel lost, confused and overwhelmed with displays that contain too much information. This paper discusses a new content-driven visual mining infrastructure called VisMine, that uses several innovative techniques: (1) hidden visual structure and relationships for uncluttering displays; (2) simultaneous visual presentations for high-dimensional knowledge discovery; and (3) a new visual interface to plug in existing graphic toolkits for expanding its use in a wide variety of visual applications. We have applied this infrastructure to three data mining visualization applications-topic hierarchy for document navigation, web-based trouble shooting, and telecom switch mining},
}
@inproceedings{p2107,
booktitle = {Proc. InfoVis},
year = 1999,
title = {Aggregate Towers: scale sensitive visualization and decluttering of geospatial data},
doi = {10.1109/INFVIS.1999.801863},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801863},
author = {Rayson, J.K.},
pages = {92--99, 149},
keywords = {data visualization, information visualization, aggregation, zoom, cartography},
abstract = {We have developed a technique, Aggregate Towers, that allows geospatial data to be visualized across a range of map scales. We use a combination of data aggregation algorithms and dynamically aggregating data markers (e.g., icons or symbols) to accommodate interactive zooming by a user while maintaining a representation that remains intuitive, consistent across multiple scales and uncluttered. This approach implicitly generates multiple levels of overview displays from a single set of underlying data},
}
@inproceedings{p2108,
booktitle = {Proc. InfoVis},
year = 1999,
title = {Cluster and calendar based visualization of time series data},
doi = {10.1109/INFVIS.1999.801851},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801851},
author = {van Wijk, J.J. and Van Selow, E.R.},
pages = {4--9, 140},
keywords = {},
abstract = {A new method is presented to get an insight into univariate time series data. The problem addressed is how to identify patterns and trends on multiple time scales (days, weeks, seasons) simultaneously. The solution presented is to cluster similar daily data patterns, and to visualize the average patterns as graphs and the corresponding days on a calendar. This presentation provides a quick insight into both standard and exceptional patterns. Furthermore, it is well suited to interactive exploration. Two applications, numbers of employees present and energy consumption, are presented},
}
@inproceedings{p2109,
booktitle = {Proc. InfoVis},
year = 1999,
title = {Constellation: a visualization tool for linguistic queries from MindNet},
doi = {10.1109/INFVIS.1999.801869},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801869},
author = {Munzner, T. and Guimbretiere, F. and Robertson, G.},
pages = {132--135, 154},
keywords = {},
abstract = {Constellation is a visualization system for the results of queries from the MindNet natural language semantic network. Constellation is targeted at helping MindNet's creators and users refine their algorithms, as opposed to understanding the structure of language. We designed a special-purpose graph layout algorithm which exploits higher-level structure in addition to the basic node and edge connectivity. Our layout prioritizes the creation of a semantic space to encode plausibility instead of traditional graph drawing metrics like minimizing edge crossings. We make careful use of several perceptual channels both to minimize the visual impact of edge crossings and to emphasize highlighted constellations of nodes and edges},
}
@inproceedings{p2110,
booktitle = {Proc. InfoVis},
year = 1999,
title = {Cushion treemaps: visualization of hierarchical information},
doi = {10.1109/INFVIS.1999.801860},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801860},
author = {van Wijk, J.J. and van de Wetering, H.},
pages = {73--78, 147},
keywords = {Information Visualization, Tree Visualization, Treemaps},
abstract = {A new method is presented for the visualization of hierarchical information, such as directory structures and organization structures. Cushion treemaps inherit the elegance of standard treemaps: compact, space-filling displays of hierarchical information, based on recursive subdivision of a rectangular image space. Intuitive shading is used to provide insight in the hierarchical structure. During the subdivision, ridges are added per rectangle, which are rendered with a simple shading model. The result is a surface that consists of recursive cushions. The method is efficient, effective, easy to use and implement, and has a wide applicability},
}
@inproceedings{p2111,
booktitle = {Proc. InfoVis},
year = 1999,
title = {Does animation help users build mental maps of spatial information?},
doi = {10.1109/INFVIS.1999.801854},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801854},
author = {Bederson, B.B. and Boltman, A.},
pages = {28--35},
keywords = {Evaluation, animation, real-time computer graphics, Zoomable User Interfaces (ZUIs), multiscale interfaces, Pad++},
abstract = {We examine how animating a viewpoint change in a spatial information system affects a user's ability to build a mental map of the information in the space. We found that animation improves users' ability to reconstruct the information space, with no penalty on task performance time. We believe that this study provides strong evidence for adding animated transitions in many applications with fixed spatial data where the user navigates around the data space},
}
@inproceedings{p2112,
booktitle = {Proc. InfoVis},
year = 1999,
title = {Domain analysis: a technique to design a user-centered visualization framework},
doi = {10.1109/INFVIS.1999.801856},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801856},
author = {Espinosa, O.J. and Hendrickson, C. and Garrett, J.H.},
pages = {44--52, 144},
keywords = {Visualization framework, Life-Cycle Assessment, user tasks, computer-human interaction, domain analysis, economic input-output},
abstract = {Domain Analysis for Data Visualization (DADV) is a technique to use when investigating a domain where data visualizations are going to be designed and added to existing software systems. DADV was used to design the data visualization in VisEIO-LCA, which is a framework to visualize environmental data about products. Most of the visualizations are designed using the following stages: formatting data in tables, selecting visual structures, and rendering the data on the screen. Although many visualization authors perform implicit domain analysis, in this paper domain analysis is added explicitly to the process of designing visualizations with the goal of producing move usable software tools. Environmental Life-Cycle Assessment (LCA) is used as a test bed for this technique},
}
@inproceedings{p2113,
booktitle = {Proc. InfoVis},
year = 1999,
title = {Dynamic hierarchy specification and visualization},
doi = {10.1109/INFVIS.1999.801859},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801859},
author = {Wilson, R.M. and Bergeron, R.D.},
pages = {65--72},
keywords = {},
abstract = {This paper describes concepts that underlie the design and implementation of an information exploration system that allows users to impose arbitrary hierarchical organizations on their data. Such hierarchies allow a user to embed important semantic information into the hierarchy definition. Our goal is to recognize the significance of this implicit information and to utilize it in the hierarchy visualization. The innovative features of our system include the dynamic modification of the hierarchy definitions and the definition and implementation of a set of layout algorithms that utilize semantic information implicit in the tree construction},
}
@inproceedings{p2114,
booktitle = {Proc. InfoVis},
year = 1999,
title = {Efficient multi-object dynamic query histograms},
doi = {10.1109/INFVIS.1999.801862},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801862},
author = {Derthick, M. and Harrison, J. and Moore, A. and Roth, S.F.},
pages = {84--91},
keywords = {Dynamic Query, Database, Probabilistic Algorithms},
abstract = {Dynamic queries offer continuous feedback during range queries, and have been shown to be effective and satisfying. Recent work has extended them to datasets of 100,000 objects and, separately, to queries involving relations among multiple objects. The latter work enables filtering houses by properties of their owners, for instance. Our primary concern is providing feedback from histograms during dynamic query. The height of each histogram bar shows the count of selected objects whose attribute value falls into a given range. Unfortunately, previous efficient algorithms for single object queries overcount in the case of multiple objects if for instance, a house has multiple owners. This paper presents an efficient algorithm that with high probability closely approximates the true counts},
}
@inproceedings{p2115,
booktitle = {Proc. InfoVis},
year = 1999,
title = {Evaluating a visualisation of image similarity as a tool for image browsing},
doi = {10.1109/INFVIS.1999.801855},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801855},
author = {Rodden, K. and Basalaj, W. and Sinclair, D. and Wood, K.},
pages = {36--43, 143},
keywords = {},
abstract = {A similarity metric based on the low-level content of images can be used to create a visualisation in which visually similar images are displayed close to each other. We are carrying out a series of experiments to evaluate the usefulness of this type of visualisation as an image browsing aid. The initial experiment, described, considered whether people would find a given photograph more quickly in a visualisation than in a randomly arranged grid of images. The results show that the subjects were faster with the visualisation, although in post-experiment interviews many of them said that they preferred the clarity and regularity of the grid. We describe an algorithm with which the best aspects of the two layout types can be combined},
}
@inproceedings{p2116,
booktitle = {Proc. InfoVis},
year = 1999,
title = {Navigating hierarchies with structure-based brushes},
doi = {10.1109/INFVIS.1999.801858},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801858},
author = {Ying-Huey Fua and Ward, M.O. and Rundensteiner, E.A.},
pages = {58--64, 146},
keywords = {Brushing, hierarchical representation, interactive selection, exploratory data analysis},
abstract = {Interactive selection is a critical component in exploratory visualization, allowing users to isolate subsets of the displayed information for highlighting, deleting, analysis, or focussed investigation. Brushing, a popular method for implementing the selection process, has traditionally been performed in either screen space or data space. We introduce the concept of a structure-based brush, which can be used to perform selection in hierarchically structured data sets. Our structure-based brush allows users to navigate hierarchies by specifying focal extents and level-of-detail on a visual representation of the structure. Proximity-based coloring, which maps similar colors to data that are closely related within the structure, helps convey both structural relationships and anomalies. We describe the design and implementation of our structure-based brushing tool. We also validate its usefulness using two distinct hierarchical visualization techniques, namely hierarchical parallel coordinates and tree-maps},
}
@inproceedings{p2117,
booktitle = {Proc. InfoVis},
year = 1999,
title = {Sensemaking of evolving Web sites using visualization spreadsheets},
doi = {10.1109/INFVIS.1999.801853},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801853},
author = {Chi, E.H. and Card, S.K.},
pages = {18--25, 142},
keywords = {Information visualization, Spreadsheet, Sensemaking, World Wide Web, Information ecologies, Log file analysis},
abstract = {In the process of knowledge discovery, workers examine available information in order to make sense of it. By sensemaking, we mean interacting with and operating on the information with a variety of information processing mechanisms. Previously, we introduced a concept that uses the spreadsheet metaphor with cells containing visualizations of complex data. We extend and apply a cognitive model called “visual sensemaking” to the visualization spreadsheet. We use the task of making sense of a large Web site as a concrete example throughout the paper for demonstration. Using a variety of visualization techniques, such as the Disk Tree and Cone Tree, we show that the interactions of the visualization spreadsheet help users draw conclusions from the overall relationships of the entire information set},
}
@inproceedings{p2118,
booktitle = {Proc. InfoVis},
year = 1999,
title = {The automated multidimensional detective},
doi = {10.1109/INFVIS.1999.801865},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801865},
author = {Inselberg, A. and Avidan, T.},
pages = {112--119, 151},
keywords = {},
abstract = {Automation has arrived to parallel coordinates. A geometrically motivated classifier is presented and applied, with both training and testing stages, to 3 real datasets. Our results compared to those from 33 other classifiers have the least error. The algorithm is based on parallel coordinates and has very low computational complexity in the number of variables and the size of the dataset-contrasted with the very high or unknown (often unstated) complexity of other classifiers, the low complexity enables the rule derivation to be done in near real-time hence making the classification adaptive to changing conditions, provides comprehensible and explicit rules-contrasted to neural networks which are “black boxes”, does dimensionality selection-where the minimal set of original variables (not transformed new variables as in Principal Component Analysis) required to state the rule is found, orders these variables so as to optimize the clarity of separation between the designated set and its complement-this solves the pesky “ordering problem” in parallel coordinates. The algorithm is display independent, hence it can be applied to very large in size and number of variables datasets. Though it is instructive to present the results visually, the input size is no longer display-limited as for visual data mining},
}
@inproceedings{p2119,
booktitle = {Proc. InfoVis},
year = 1999,
title = {The sunflower visual metaphor, a new paradigm for dimensional compression},
doi = {10.1109/INFVIS.1999.801868},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801868},
author = {Rose, S.},
pages = {128--131},
keywords = {information visualization, text visualization, visualization, knowledge management, information retrieval},
abstract = {This paper introduces the Sunflower visual metaphor for information visualization. The visual metaphor is presented as an alternative to current techniques of dimensional compression and the visualization tools that employ them. The paper discusses the motivation for the Sunflower paradigm, its implementation and critical factors for producing an effective visualization. A primary driver in this research effort has been to develop a visualization tool that facilitates browsing, knowledge discovery, and that supports learning through sense making and integration of new information},
}
@inproceedings{p2120,
booktitle = {Proc. InfoVis},
year = 1999,
title = {VisageWeb: visualizing WWW data in Visage},
doi = {10.1109/INFVIS.1999.801864},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801864},
author = {Higgins, M. and Lucas, P. and Sean, J.},
pages = {100--107, 150},
keywords = {World Wide Web, Information Visualization, User Interface},
abstract = {VisageWeb is an information-centric user interface to the World Wide Web built within the Visage data visualization environment. This paper traces the development of the VisageWeb project, using it to motivate an exploration of how an information-centric architecture copes with new visualization challenges. We conclude with a presentation of the VisageWeb prototype itself},
}
@inproceedings{p2121,
booktitle = {Proc. InfoVis},
year = 1999,
title = {Visualizing application behavior on superscalar processors},
doi = {10.1109/INFVIS.1999.801852},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801852},
author = {Stolte, C. and Bosche, R. and Hanrahan, P. and Rosenblum, M.},
pages = {10--17, 141},
keywords = {Computer systems visualization, visualization systems, superscalar processors},
abstract = {The advent of superscalar processors with out-of-order execution makes it increasingly difficult to determine how well an application is utilizing the processor and how to adapt the application to improve its performance. We describe a visualization system for the analysis of application behavior on superscalar processors. Our system provides an overview-plus-detail display of the application's execution. A timeline view of pipeline performance data shows the overall utilization of the pipeline. This information is displayed using multiple time scales, enabling the user to drill down from a high-level application overview to a focus region of hundreds of cycles. This region of interest is displayed in detail using an animated cycle-by-cycle view of the execution. This view shows how instructions are reordered and executed and how functional units are being utilized. Additional context views correlate instuctions in this detailed view with the relevant source code for the application. This allows the user to discover the root cause of the poor pipeline utilization and make changes to the application to improve its performance. This visualization system can be easily configured to display a variety of processor models and configurations. We demonstrate it for both the MXS and MMIX processor models},
}
@inproceedings{p2122,
booktitle = {Proc. InfoVis},
year = 1999,
title = {Visualizing association rules for text mining},
doi = {10.1109/INFVIS.1999.801866},
url = {http://dx.doi.org/10.1109/INFVIS.1999.801866},
author = {Pak Chung Wong and Whitney, P. and Thomas, J.},
pages = {120--123, 152},
keywords = {},
abstract = {An association rule in data mining is an implication of the form X→Y where X is a set of antecedent items and Y is the consequent item. For years researchers have developed many tools to visualize association rules. However, few of these tools can handle more than dozens of rules, and none of them can effectively manage rules with multiple antecedents. Thus, it is extremely difficult to visualize and understand the association information of a large data set even when all the rules are available. This paper presents a novel visualization technique to tackle many of these problems. We apply the technology to a text mining study on large corpora. The results indicate that our design can easily handle hundreds of multiple antecedent association rules in a three-dimensional display with minimum human interaction, low occlusion percentage, and no screen swapping},
}
@inproceedings{p2196,
booktitle = {Proc. InfoVis},
year = 1998,
title = {Algorithm visualization for distributed environments},
doi = {10.1109/INFVIS.1998.729561},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729561},
author = {Moses, Y. and Polunsky, Z. and Tal, A. and Ulitsky, L.},
pages = {71--78, 154},
keywords = {},
abstract = {The paper investigates the visualization of distributed algorithms. We present a conceptual model and a system, VADE, that realizes this model. Since in asynchronous distributed systems there is no way of knowing (let alone, visualizing) the “real” execution, we show how to generate a visualization which is consistent with the execution of the distributed algorithm. We also present the design and implementation of our system. VADE is designed so that the algorithm runs on the server's machines while the visualization is executed on a Web page on the client's machine. Programmers can write animations quickly and easily with the assistance of VADE's libraries},
}
@inproceedings{p2197,
booktitle = {Proc. InfoVis},
year = 1998,
title = {An interactive view for hierarchical clustering},
doi = {10.1109/INFVIS.1998.729556},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729556},
author = {Wills, G.J.},
pages = {26--31, 150},
keywords = {},
abstract = {The paper describes a visualization of a general hierarchical clustering algorithm that allows the user to manipulate the number of classes produced by the clustering method without requiring a radical re-drawing of the clustering tree. The visual method used, a space filling recursive division of a rectangular area, keeps the items under consideration at the same screen position, even while the number of classes is under interactive control. As well as presenting a compact representation of the clustering with different cluster numbers, this method is particularly useful in a linked views environment where additional information can be added to a display to encode other information, without this added level of detail being perturbed when changes are made to the number of clusters},
}
@inproceedings{p2198,
booktitle = {Proc. InfoVis},
year = 1998,
title = {An operator interaction framework for visualization systems},
doi = {10.1109/INFVIS.1998.729560},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729560},
author = {Ed Huai-Hsin Chi and Riedl, J.},
pages = {63--70},
keywords = {information visualization, operators, user interactions, view/value, framework, spreadsheet, design, extensibility, visualization systems},
abstract = {Information visualization encounters a wide variety of different data domains. The visualization community has developed representation methods and interactive techniques. As a community, we have realized that the requirements in each domain are often dramatically different. In order to easily apply existing methods, researchers have developed a semiology of graphic representations. We have extended this research into a framework that includes operators and interactions in visualization systems, such as a visualization spreadsheet. We discuss properties of this framework and use it to characterize operations spanning a variety of different visualization techniques. The framework developed in the paper enables a new way of exploring and evaluating the design space of visualization operators, and helps end users in their analysis tasks},
}
@inproceedings{p2199,
booktitle = {Proc. InfoVis},
year = 1998,
title = {BiblioMapper: a cluster-based information visualization technique},
doi = {10.1109/INFVIS.1998.729569},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729569},
author = {Min Song},
pages = {130--136},
keywords = {Visualization, Information Retrieval,Clustering Algorithms, Textual Information},
abstract = {The purpose of the paper is to develop a visualization system of a document space, called BiblioMapper, for CISI collections, one of the bibliographic databases available on the Internet. The major function of BiblioMapper is to visualize the document space with a cluster-based visualization technique. The cluster-based visualization technique assembles a set of documents according to semantic similarities. One advantage of this technique is that users are able to focus on and assess each cluster and the documents which the cluster comprises according to their information needs},
}
@inproceedings{p2200,
booktitle = {Proc. InfoVis},
year = 1998,
title = {Comparative visualization of protein structure-sequence alignments},
doi = {10.1109/INFVIS.1998.729566},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729566},
author = {Hansen, M. and Meads, D. and Pang, A.},
pages = {106--110, 158},
keywords = {proteins, structure, alignment, fold recognition, threading, similarity, glyphs, streamlines, ribbons,amino acids},
abstract = {Protein fold recognition (threading) involves the prediction of a protein's three-dimensional shape based on its similarity to a protein whose structure is known. Fold predictions are low resolution; no effort is made to rotate the protein's component amino acid side chains into their correct spatial orientations. Rather, the goal is to recognize the protein family member that most closely resembles the target sequence of unknown structure and to create a sensible alignment of the target to the structure (i.e., a structure-sequence alignment). To complement this structure prediction method the authors have implemented a low resolution molecular graphics tool. Since amino acid side chain orientation is not relevant in fold recognition, amino acid residues are represented by abstract shapes or glyphs much like LegoTM blocks. They also borrow techniques from comparative streamline visualization to provide clean depictions of the entire protein structure model. By creating a low resolution representation of protein structure, they are able to approximately double the amount of information on the screen. This implementation also possesses the advantage of eliminating distracting and possibly misleading visual clutter resulting from the mapping of protein alignment information onto a high resolution display of a known structure.},
}
@inproceedings{p2201,
booktitle = {Proc. InfoVis},
year = 1998,
title = {Dynamic aggregation with circular visual designs},
doi = {10.1109/INFVIS.1998.729557},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729557},
author = {Chuah, M.C.},
pages = {35--43, 151},
keywords = {},
abstract = {One very effective method for managing large data sets is aggregation or binning. We consider two aggregation methods that are tightly coupled with interactive manipulation and the visual representation of the data. Through this integration we hope to provide effective support for the aggregation process, specifically by enabling: 1) automatic aggregation, 2) continuous change and control of the aggregation level, 3) spatially based aggregates, 4) context maintenance across different aggregate levels, and 5) feedback on the level of aggregation},
}
@inproceedings{p2202,
booktitle = {Proc. InfoVis},
year = 1998,
title = {Geographic visualization: designing manipulable maps for exploring temporally varying georeferenced statistics},
doi = {10.1109/INFVIS.1998.729563},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729563},
author = {MacEachren, A.M. and Boscoe, F.P. and Haug, D. and Pickle, L.},
pages = {87--94, 156},
keywords = {},
abstract = {Geographic visualization, sometimes called cartographic visualization, is a form of information visualization in which principles from cartography, geographic information systems (GIS), exploratory data analysis (EDA), and information visualization more generally are integrated in the development and assessment of visual methods that facilitate the exploration, analysis, synthesis, and presentation of georeferenced information. The authors report on development and use of one component of a prototype GVis environment designed to facilitate exploration, by domain experts, of time series multivariate georeferenced health statistics. Emphasis is on how manipulable dynamic GVis tools may facilitate visual thinking, pattern noticing, and hypothesis generation. The prototype facilitates the highlighting of data extremes, examination of change in geographic patterns over time, and exploration of similarity among georeferenced variables. A qualitative exploratory analysis of verbal protocols and transaction logs is used to characterize system use. Evidence produced through the characterization highlights differences among experts in data analysis strategies (particularly in relation to the use of attribute “focusing” combined with time series animation) and corresponding differences in success at noticing spatiotemporal patterns},
}
@inproceedings{p2203,
booktitle = {Proc. InfoVis},
year = 1998,
title = {IVORY-an object-oriented framework for physics-based information visualization in Java},
doi = {10.1109/INFVIS.1998.729562},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729562},
author = {Sprenger, T.C. and Gross, M. and Bielser, D. and Strasser, T.},
pages = {79--86, 155},
keywords = {three-dimensional information visualization,physics-based graph layout, object-oriented visualization toolkit,multidimensional information modeling, time varying data},
abstract = {We present IVORY a newly developed, platform-independent framework for physics based visualization. IVORY is especially designed for information visualization applications and multidimensional graph layout. It is fully implemented in Java 1.1 and its architecture features client server setup, which allows us to run the visualization even on thin clients. In addition, VRML 2.0 exports can be viewed by any VRML plugged-in WWW browser. Individual visual metaphors are invoked into IVORY via an advanced plug-in mechanism, where plug-ins can be implemented by any experienced user. The configuration of IVORY is accomplished using a script language, called IVML. Some interactive visualization examples, such as the integration of a haptic interface illustrate the performance and versatility of our system. Our current implementation supports NT 4.0},
}
@inproceedings{p2204,
booktitle = {Proc. InfoVis},
year = 1998,
title = {LensBar-visualization for browsing and filtering large lists of data},
doi = {10.1109/INFVIS.1998.729567},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729567},
author = {Masui, T.},
pages = {113--120, 159},
keywords = {},
abstract = {The author proposes a simple and powerful graphical interface tool called the LensBar for filtering and visualizing large lists of data. Browsing and querying are the most important tasks in retrieving information and LensBar integrates the two techniques into a simple scroll window with slider. While it looks familiar to users of conventional graphical interface tools, its filtering and zooming features offer sophisticated handling of large lists of textual data},
}
@inproceedings{p2205,
booktitle = {Proc. InfoVis},
year = 1998,
title = {Multi-faceted insight through interoperable visual information analysis paradigms},
doi = {10.1109/INFVIS.1998.729570},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729570},
author = {Hetzler, E. and Whitney, P. and Martucci, L. and Thomas, J.},
pages = {137--144, 161},
keywords = {information visualization, user scenario,information analysis, document analysis},
abstract = {To gain insight and understanding of complex information collections, users must be able to visualize and explore many facets of the information. The paper presents several novel visual methods from an information analyst's perspective. The authors present a sample scenario, using the various methods to gain a variety of insights from a large information collection. They conclude that no single paradigm or visual method is sufficient for many analytical tasks. Often a suite of integrated methods offers a better analytic environment in today's emerging culture of information overload and rapidly changing issues. They also conclude that the interactions among these visual paradigms are equally as important as, if not more important than, the paradigms themselves},
}
@inproceedings{p2206,
booktitle = {Proc. InfoVis},
year = 1998,
title = {Reconfigurable disc trees for visualizing large hierarchical information space},
doi = {10.1109/INFVIS.1998.729555},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729555},
author = {Chang-Sung Jeong and Pang, A.},
pages = {19--25, 149},
keywords = {Information visualization, disc tree,compact disc tree, plane disc tree, hierarchy},
abstract = {We present a new visualization technique, called RDT (Reconfigurable Disc Tree) which can alleviate the disadvantages of cone trees significantly for large hierarchies while maintaining its context of using 3D depth. In RDT, each node is associated with a disc, around which its children are placed. Using discs instead of cones as the basic shape in RDT has several advantages: significant reduction of occluded region, sharp increase in number of displayed nodes, and easy projection onto plane without visual overlapping. We show that RDT can greatly enhance user perception by transforming its shapes dynamically in several ways: (1) disc tree which can significantly reduce the occluded region by the foreground objects; (2) compact disc tree which can increase the number of nodes displayed on the screen; and (3) plane disc tree which can be mapped onto the plane without visual overlapping. We describe an implementation of our visualization system called VISIT (Visual Information System for reconfigurable dIsc tree). It provides 2D and 3D layouts for RDT and various user interface features such as tree reconfiguration, tree transformation, tree shading, viewing transformation, animation, selection and browsing which can enhance the user perception and navigation capabilities. We also evaluate our system using the following three metrics: percentage of occlusion, density of displayed nodes on a screen, and number of identifiable nodes},
}
@inproceedings{p2207,
booktitle = {Proc. InfoVis},
year = 1998,
title = {Saying it in graphics: from intentions to visualizations},
doi = {10.1109/INFVIS.1998.729564},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729564},
author = {Kerpedjiev, S. and Carenini, G. and Green, N. and Moore, J. and Roth, S.F.},
pages = {97--101},
keywords = {},
abstract = {The authors propose a methodology for automatically realizing communicative goals in graphics. It features a task model that mediates the communicative intent and the selection of graphical techniques. The methodology supports the following functions: isolating assertions presentable in graphics; mapping such assertions into tasks for the potential reader, and selecting graphical techniques that support those tasks. They illustrate the methodology by redesigning a textual argument into a multimedia one with the same rhetorical and content structures but employing graphics to achieve some of the intentions},
}
@inproceedings{p2208,
booktitle = {Proc. InfoVis},
year = 1998,
title = {Similarity clustering of dimensions for an enhanced visualization of multidimensional data},
doi = {10.1109/INFVIS.1998.729559},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729559},
author = {Ankerst, M. and Berchtold, S. and Keim, D.A.},
pages = {52--60, 153},
keywords = {},
abstract = {The order and arrangement of dimensions (variates) is crucial for the effectiveness of a large number of visualization techniques such as parallel coordinates, scatterplots, recursive pattern, and many others. We describe a systematic approach to arrange the dimensions according to their similarity. The basic idea is to rearrange the data dimensions such that dimensions showing a similar behavior are positioned next to each other. For the similarity clustering of dimensions, we need to define similarity measures which determine the partial or global similarity of dimensions. We then consider the problem of finding an optimal one- or two-dimensional arrangement of the dimensions based on their similarity. Theoretical considerations show that both, the one- and the two-dimensional arrangement problem are surprisingly hard problems, i.e. they are NP complete. Our solution of the problem is therefore based on heuristic algorithms. An empirical evaluation using a number of different visualization techniques shows the high impact of our similarity clustering of dimensions on the visualization results},
}
@inproceedings{p2209,
booktitle = {Proc. InfoVis},
year = 1998,
title = {The generalized detail in-context problem},
doi = {10.1109/INFVIS.1998.729558},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729558},
author = {Keahey, T.A.},
pages = {44--51, 152},
keywords = {},
abstract = {The paper describes a general formulation of the “detail-in-context” problem, which is a central issue of fundamental importance to a wide variety of nonlinear magnification systems. A number of tools are described for dealing with this problem effectively. These tools can be applied to any continuous nonlinear magnification system, and are not tied to specific implementation features of the system that produced the original transformation. Of particular interest is the development of “seamless multi level views”, which allow multiple global views of an information space (each having different information content) to be integrated into a single view without discontinuity},
}
@inproceedings{p2210,
booktitle = {Proc. InfoVis},
year = 1998,
title = {The shape of Shakespeare: visualizing text using implicit surfaces},
doi = {10.1109/INFVIS.1998.729568},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729568},
author = {Rohrer, R.M. and Ebert, D.S. and Sibert, J.L.},
pages = {121--129, 160},
keywords = {information visualization, text visualization, procedural visualization, implicit surface modeling, blobby models, document clustering, information retrieval, graphics, user interfaces},
abstract = {Information visualization focuses on the use of visual means for exploring non-visual information. While free-form text is a rich, common source of information, visualization of text is a challenging problem since text is inherently non-spatial. The paper explores the use of implicit surface models for visualizing text. The authors describe several techniques for text visualization that aid in understanding document content and document relationships. A simple method is defined for mapping document content to shape. By comparing the shapes of multiple documents, global content similarities and differences may be noted. In addition, they describe a visual clustering method in which documents are arranged in 3D based upon similarity scoring. Documents deemed closely related blend together as a single connected shape. Hence, a document corpus becomes a collection of shapes that reflect inter-document relationships. These techniques provide methods to visualize individual documents as well as corpus meta-data. They then combine the two techniques to produce transparent clusters enclosing individual document shapes. This provides a way to visualize both local and global contextual information. Finally, they elaborate on several potential applications of these methods},
}
@inproceedings{p2211,
booktitle = {Proc. InfoVis},
year = 1998,
title = {Traversal-based visualization of data structures},
doi = {10.1109/INFVIS.1998.729554},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729554},
author = {Korn, J. and Appel, A.W.},
pages = {11--18},
keywords = {},
abstract = {Algorithm animation systems and graphical debuggers perform the task of translating program state into visual representations. While algorithm animations typically rely on user augmented source code to produce visualizations, debuggers make use of symbolic information in the target program. As a result, visualizations produced by debuggers often lack important semantic content, making them inferior to algorithm animation systems. The paper presents a method to provide higher level, more informative visualizations in a debugger using a technique called traversal based visualization. The debugger traverses a data structure using a set of user supplied patterns to identify parts of the data structure to be drawn a similar way. A declarative language is used to specify the patterns and the actions to take when the patterns are encountered. Alternatively, the user can construct traversal specifications through a graphical user interface to the declarative language. Furthermore, the debugger supports modification of data. Changes made to the on-screen representation are reflected in the underlying data},
}
@inproceedings{p2212,
booktitle = {Proc. InfoVis},
year = 1998,
title = {Visualizing decision table classifiers},
doi = {10.1109/INFVIS.1998.729565},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729565},
author = {Becker, B.},
pages = {102--105, 157},
keywords = {},
abstract = {Decision tables, like decision trees or neural nets, are classification models used for prediction. They are induced by machine learning algorithms. A decision table consists of a hierarchical table in which each entry in a higher level table gets broken down by the values of a pair of additional attributes to form another table. The structure is similar to dimensional stacking. A visualization method is presented that allows a model based on many attributes to be understood even by those unfamiliar with machine learning. Various forms of interaction are used to make this visualization more useful than other static designs},
}
@inproceedings{p2213,
booktitle = {Proc. InfoVis},
year = 1998,
title = {WEBPATH-a three dimensional Web history},
doi = {10.1109/INFVIS.1998.729553},
url = {http://dx.doi.org/10.1109/INFVIS.1998.729553},
author = {Frecon, E. and Smith, G.},
pages = {3--10, 148},
keywords = {Virtual Environments, World-Wide-Web,Visualisation, Web Browsing},
abstract = {A number of usability studies report that many users of the WWW cannot find pages already visited, additionally many users cannot visualise where they are, or where they have been browsing. Currently, readily available WWW browsers provide history mechanisms that offer little or no support in the presentation and manipulation of visited sites. Manipulation and presentation of usage data, such as a browse history has been used in a number of cases to aid users in searching for previously attained data, and to teach or assist other users in their browse or searching techniques. The paper presents a virtual reality (VR) based application to be used alongside traditional Web browsers, which provides them with a flexibly tailorable real time visualisation of their history},
}
@inproceedings{p2291,
booktitle = {Proc. InfoVis},
year = 1997,
title = {A spreadsheet approach to information visualization},
doi = {10.1109/INFVIS.1997.636761},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636761},
author = {Ed Huai-Hsin Chi and Barry, P. and Riedl, J. and Konstan, J.},
pages = {17--24},
keywords = {},
abstract = {In information visualization, as the volume and complexity of the data increases, researchers require more powerful visualization tools that enable them to more effectively explore multidimensional datasets. We discuss the general utility of a novel visualization spreadsheet framework. Just as a numerical spreadsheet enables exploration of numbers, a visualization spreadsheet enables exploration of visual forms of information. We show that the spreadsheet approach facilitates certain information visualization tasks that are more difficult using other approaches. Unlike traditional spreadsheets, which store only simple data elements and formulas in each cell, a visualization spreadsheet cell can hold an entire complex data set, selection criteria, viewing specifications, and other information needed for a full-fledged information visualization. Similarly, inter-cell operations are far more complex, stretching beyond simple arithmetic and string operations to encompass a range of domain-specific operators. We have built two prototype systems that illustrate some of these research issues. The underlying approach in our work allows domain experts to define new data types and data operations, and enables visualization experts to incorporate new visualizations, viewing parameters, and view operations.},
}
@inproceedings{p2292,
booktitle = {Proc. InfoVis},
year = 1997,
title = {Adaptive information visualization based on the user's multiple viewpoints - interactive 3D visualization of the WWW},
doi = {10.1109/INFVIS.1997.636778},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636778},
author = {Teraoka, T. and Maruyama, M.},
pages = {25--28},
keywords = {},
abstract = {We introduce the adaptive information visualization method for hypermedia and the WWW based on the user's multiple viewpoints. We propose two graphical interfaces, the CVI and the RF-Cone. The CVI is the interface for interactive viewpoint selection. We can select a viewpoint reflecting our interests by using the CVI. According to the given viewpoint, the RF-Cone adaptively organizes the 3D representation of the hypermedia so that we can understand the semantic and structural relationship of the hypermedia and easily retrieve the information. Combining these methods, we have developed the WWW visualization system which can provide highly efficient navigation.},
}
@inproceedings{p2293,
booktitle = {Proc. InfoVis},
year = 1997,
title = {Cacti: a front end for program visualization},
doi = {10.1109/INFVIS.1997.636785},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636785},
author = {Reiss, S.P.},
pages = {46--49},
keywords = {},
abstract = {We describe a system that allows the user to rapidly construct program visualizations over a variety of data sources. Such a system is a necessary foundation for using visualization as an aid to software understanding. The system supports an arbitrary set of data sources so that information from both static and dynamic analysis can be combined to offer meaningful software visualizations. It provides the user with a visual universal-relation front end that supports the definition of queries over multiple data sources without knowledge of the structure or contents of the sources. It uses a flexible back end with a range of different visualizations, most geared to the efficient display of large amounts of data. The result is a high-quality, easy-to-define program visualization that can address specific problems and hence is useful for software understanding. The overall system is flexible and extensible in that both the underlying data model and the set of visualizations are defined in resource files.},
}
@inproceedings{p2294,
booktitle = {Proc. InfoVis},
year = 1997,
title = {Coordinating declarative queries with a direct manipulation data exploration environment},
doi = {10.1109/INFVIS.1997.636788},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636788},
author = {Derthick, M. and Roth, S.F. and Kolojejchick, J.},
pages = {65--72},
keywords = {},
abstract = {Interactive visualization techniques allow data exploration to be a continuous process, rather than a discrete sequence of queries and results as in traditional database systems. However limitations in expressive power of current visualization systems force users to go outside the system and form a new dataset in order to perform certain operations, such as those involving the relationship among multiple objects. Further, there is no support for integrating data from the new dataset into previous visualizations, so users must recreate them. Visage's information centric paradigm provides an architectural hook for linking data across multiple queries, removing this overhead. This paper describes the addition to Visage of a visual query language, called VQE, which allows users to express more complicated queries than in previous interactive visualization systems. Visualizations can be created from queries and vice versa. When either is updated, the other changes to maintain consistency.},
}
@inproceedings{p2295,
booktitle = {Proc. InfoVis},
year = 1997,
title = {Design and evaluation of incremental data structures and algorithms for dynamic query interfaces},
doi = {10.1109/INFVIS.1997.636790},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636790},
author = {Tanin, E. and Beigel, R. and Shneiderman, B.},
pages = {81--86},
keywords = {Data Structure, Algorithm, Database, User Interface, Information Visualization, Direct Manipulation, Dynamic Query},
abstract = {A dynamic query interface (DQI) is a database access mechanism that provides continuous real-time feedback to the user during query formulation. Previous work shows that DQIs are elegant and powerful interfaces to small databases. Unfortunately, when applied to large databases, previous DQI algorithms slow to a crawl. We present a new incremental approach to DQI algorithms and display updates that work well with large databases, both in theory and in practice.},
}
@inproceedings{p2296,
booktitle = {Proc. InfoVis},
year = 1997,
title = {Domesticating Bead: adapting an information visualization system to a financial institution},
doi = {10.1109/INFVIS.1997.636789},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636789},
author = {Brodbeck, D. and Chalmers, M. and Lunzer, A. and Cotture, P.},
pages = {73--80},
keywords = {},
abstract = {The Bead visualization system employs a fast algorithm for laying out high-dimensional data in a low-dimensional space, and a number of features added to 3D visualizations to improve imageability. We describe recent work on both aspects of the system, in particular a generalization of the data types laid out and the implementation of imageability features in a 2D visualization tool. The variety of data analyzed in a financial institution such as UBS, and the ubiquity of spreadsheets as a medium for analysis, led us to extend our layout tools to handle data in a generic spreadsheet format. We describe the metrics of similarity used for this data type, and give examples of layouts of sets of records of financial trades. Conservatism and scepticism with regard to 3D visualization, along with the lack of functionality of widely available 3D web browsers, led to the development of a 2D visualization tool with refinements of a number of our imageability features.},
}
@inproceedings{p2297,
booktitle = {Proc. InfoVis},
year = 1997,
title = {H3: laying out large directed graphs in 3D hyperbolic space},
doi = {10.1109/INFVIS.1997.636718},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636718},
author = {Munzner, T.},
pages = {2--10},
keywords = {},
abstract = {We present the H3 layout technique for drawing large directed graphs as node-link diagrams in 3D hyperbolic space. We can lay out much larger structures than can be handled using traditional techniques for drawing general graphs because we assume a hierarchical nature of the data. We impose a hierarchy on the graph by using domain-specific knowledge to find an appropriate spanning tree. Links which are not part of the spanning tree do not influence the layout but can be selectively drawn by user request. The volume of hyperbolic 3-space increases exponentially, as opposed to the familiar geometric increase of euclidean 3-space. We exploit this exponential amount of room by computing the layout according to the hyperbolic metric. We optimize the cone tree layout algorithm for 3D hyperbolic space by placing children on a hemisphere around the cone mouth instead of on its perimeter. Hyperbolic navigation affords a Focus+Context view of the structure with minimal visual clutter. We have successfully laid out hierarchies of over 20,000 nodes. Our implementation accommodates navigation through graphs too large to be rendered interactively by allowing the user to explicitly prune or expand subtrees.},
}
@inproceedings{p2298,
booktitle = {Proc. InfoVis},
year = 1997,
title = {Managing multiple focal levels in Table Lens},
doi = {10.1109/INFVIS.1997.636787},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636787},
author = {Tenev, T. and Rao, R.},
pages = {59--63},
keywords = {Focus+Context, Fisheye, Information visualization, Table Lens},
abstract = {The Table Lens, focus+context visualization for large data tables, allows users to see 100 times as many data values as a spreadsheet in the same screen space in a manner that enables an extremely immediate form of exploratory data analysis. In the original Table Lens design, data are shown in the context area using graphical representations in a single pixel row. Scaling up the Table Lens technique beyond approximately 500 cases (rows) by 40 variables (columns) requires not showing every value individually and thus raises challenges for preserving the exploratory and navigational ease and power of the original design. We describe two design enhancements for introducing regions of less than a pixel row for each data value and discuss the issues raised by each.},
}
@inproceedings{p2299,
booktitle = {Proc. InfoVis},
year = 1997,
title = {Managing software with new visual representations},
doi = {10.1109/INFVIS.1997.636782},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636782},
author = {Chuah, M.C. and Eick, S.G.},
pages = {30--37},
keywords = {},
abstract = {Managing large projects is a very challenging task requiring the tracking and scheduling of many resources. Although new technologies have made it possible to automatically collect data on project resources, it is very difficult to access this data because of its size and lack of structure. We present three novel glyphs for simplifying this process and apply them to visualizing statistics from a multi-million line software project. These glyphs address four important needs in project management: viewing time dependent data; managing large data volumes; dealing with diverse data types; and correspondence of data to real-world concepts.},
}
@inproceedings{p2300,
booktitle = {Proc. InfoVis},
year = 1997,
title = {Metrics for effective information visualization},
doi = {10.1109/INFVIS.1997.636794},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636794},
author = {Brath, R.},
pages = {108--111},
keywords = {},
abstract = {Metrics for information visualization will help designers create and evaluate 3D information visualizations. Based on experience from 60+ 3D information visualizations, the metrics we propose are: number of data points and data density; number of dimensions and cognitive overhead; occlusion percentage; and reference context and percentage of identifiable points.},
}
@inproceedings{p2301,
booktitle = {Proc. InfoVis},
year = 1997,
title = {Multidimensional detective},
doi = {10.1109/INFVIS.1997.636793},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636793},
author = {Inselberg, A.},
pages = {100--107},
keywords = {},
abstract = {The display of multivariate datasets in parallel coordinates, transforms the search for relations among the variables into a 2-D pattern recognition problem. This is the basis for the application to visual data mining. The knowledge discovery process together with some general guidelines are illustrated on a dataset from the production of a VLSI chip. The special strength of parallel coordinates is in modeling relations. As an example, a simplified economic model is constructed with data from various economic sectors of a real country. The visual model shows the interelationship and dependencies between the sectors, circumstances where there is competition for the same resource, and feasible economic policies. Interactively, the model can be used to do trade-off analyses, discover sensitivities, do approximate optimization, monitor (as in a process) and provide decision support.},
}
@inproceedings{p2302,
booktitle = {Proc. InfoVis},
year = 1997,
title = {Nonlinear magnification fields},
doi = {10.1109/INFVIS.1997.636786},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636786},
author = {Keahey, T.A. and Robertson, E.L.},
pages = {51--58},
keywords = {information visualization, nonlinear magnification, data-driven magnification, fisheye views, magnification brushing, data-mining},
abstract = {We introduce nonlinear magnification fields as an abstract representation of nonlinear magnification, providing methods for converting transformation routines to magnification fields and vice-versa. This new representation provides ease of manipulation and power of expression. By removing the restrictions of explicit foci and allowing precise specification of magnification values, we can achieve magnification effects which were not previously possible. Of particular interest are techniques we introduce for expressing complex and subtle magnification effects through magnification brushing, and allowing intrinsic properties of the data being visualized to create data-driven magnifications.},
}
@inproceedings{p2303,
booktitle = {Proc. InfoVis},
year = 1997,
title = {On integrating visualization techniques for effective software exploration},
doi = {10.1109/INFVIS.1997.636784},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636784},
author = {Storey, M. and Wong, K. and Fracchia, F.D. and Muller, H.A.},
pages = {38--45},
keywords = {Nested graphs, pan and zoom, fisheye views, hypertext, mental map, software visualization, program understanding},
abstract = {This paper describes the SHriMP visualization technique for seamlessly exploring software structure and browsing source code, with a focus on effectively assisting hybrid program comprehension strategies. The technique integrates both pan+zoom and fisheye-view visualization approaches for exploring a nested graph view of software structure. The fisheye-view approach handles multiple focal points, which are necessary when examining several subsystems and their mutual interconnections. Source code is presented by embedding code fragments within the nodes of the nested graph. Finer connections among these fragments are represented by a network that is navigated using a hypertext link-following metaphor. SHriMP combines this hypertext metaphor with animated panning and zooming motions over the nested graph to provide continuous orientation and contextual cues for the user. The SHriMP tool is being evaluated in several user studies. Observations of users performing program understanding tasks with the tool are discussed.},
}
@inproceedings{p2304,
booktitle = {Proc. InfoVis},
year = 1997,
title = {The structure of the information visualization design space},
doi = {10.1109/INFVIS.1997.636792},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636792},
author = {Card, S.K. and Mackinlay, J.},
pages = {92--99},
keywords = {information visualization, taxonomy, design space, morphological analysis},
abstract = {Research on information visualization has reached the point where a number of successful point designs have been proposed and a variety of techniques have been discovered. It is now appropriate to describe and analyze portions of the design space so as to understand the differences among designs and to suggest new possibilities. This paper proposes an organization of the information visualization literature and illustrates it with a series of examples. The result is a framework for designing new visualizations and augmenting existing designs.},
}
@inproceedings{p2305,
booktitle = {Proc. InfoVis},
year = 1997,
title = {Visualizing information on a sphere},
doi = {10.1109/INFVIS.1997.636759},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636759},
author = {Gross, M. and Sprenger, T.C. and Finger, J.},
pages = {11--16},
keywords = {information visualization, physically-based systems, multidimensional information space, hierarchies, blobby clustering},
abstract = {We describe a method for the visualization of information units on spherical domains which is employed in the banking industry for risk analysis, stock prediction and other tasks. The system is based on a quantification of the similarity of related objects that governs the parameters of a mass-spring system. Unlike existing approaches we initialize all information units onto the inner surface of two concentric spheres and attach them with springs to the outer sphere. Since the spring stiffnesses correspond to the computed similarity measures, the system converges into an energy minimum which reveals multidimensional relations and adjacencies in terms of spatial neighborhoods. Depending on the application scenario our approach supports different topological arrangements of related objects. In order to cope with large data sets we propose a blobby clustering mechanism that enables encapsulation of similar objects by implicit shapes. In addition, we implemented various interaction techniques allowing semantic analysis of the underlying data sets. Our prototype system IVORY is written in Java, and its versatility is illustrated by an example from financial service providers.},
}
@inproceedings{p2306,
booktitle = {Proc. InfoVis},
year = 1997,
title = {Volume rendering for relational data},
doi = {10.1109/INFVIS.1997.636791},
url = {http://dx.doi.org/10.1109/INFVIS.1997.636791},
author = {Becker, B.},
pages = {87--90},
keywords = {volume rendering, relational data, scatterplot, multivariate data, information visualization},
abstract = {A method for efficiently volume rendering dense scatterplots of relational data is described. Plotting difficulties that arise from large numbers of data points, categorical variables, interaction with non-axis dimensions, and unknown values, are addressed by this method. The domain of the plot is voxelized using binning and then volume rendering. Since a table is used as the underlying data structure, no storage is wasted on regions with no data. The opacity of each voxel is a function of the number of data points in a corresponding bin. A voxel's color is derived by averaging the value of one of the variables for all the data points that fall in a bin. Other variables in the data may be mapped to external query sliders. A dragger object permits a user to select regions inside the volume.},
}
@misc{p2382,
year = 1996,
title = {Animating multidimensional scaling to visualize N-dimensional data sets},
doi = {10.1109/INFVIS.1996.559223},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559223},
author = {Bentley, C.L. and Ward, M.O.},
pages = {72--73, 126},
keywords = {},
abstract = {Many techniques have been developed for visualizing multivariate (multidimensional) data. Most, if not all, are limited by the number of dimensions which can be effectively displayed. Multidimensional scaling (MDS) is an iterative non-linear technique for projecting n-D data down to a lower number of dimensions. This work presents extensions to MDS that enhance visualization of high-dimensional data sets. These extensions include animation, stochastic perturbation, and flow visualization techniques},
}
@inproceedings{p2383,
booktitle = {Proc. InfoVis},
year = 1996,
title = {Data characterization for automatically visualizing heterogeneous information},
doi = {10.1109/INFVIS.1996.559211},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559211},
author = {Zhou, M.X. and Feiner, S.},
pages = {13--20, 117},
keywords = {},
abstract = {Automated graphical generation systems should be able to design effective presentations for heterogeneous (quantitative and qualitative) information in static or interactive environments. When building such a system, it is important to thoroughly understand the presentation-related characteristics of domain-specific information. We define a data-analysis taxonomy that can be used to characterize heterogeneous information. In addition to capturing the presentation-related properties of data, our characterization takes into account the user's information-seeking goals and visual-interpretation preferences. We use automatically-generated examples from two different application domains to demonstrate the coverage of the proposed taxonomy and its utility for selecting effective graphical techniques},
}
@inproceedings{p2384,
booktitle = {Proc. InfoVis},
year = 1996,
title = {DEPICT: Documents Evaluated as Pictures. Visualizing information using context vectors and self-organizing maps},
doi = {10.1109/INFVIS.1996.559228},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559228},
author = {Rushall, D.A. and Ilgen, M.R.},
pages = {100--107, 131},
keywords = {},
abstract = {HNC Software, Inc. has developed a system called DEPICT for visualizing the information content of large textual corpora. The system is built around two separate neural network methodologies: context vectors and self-organizing maps. Context vectors (CVs) are high dimensional information representations that encode the semantic content of the textual entities they represent. Self-organizing maps (SOMs) are capable of transforming an input, high dimensional signal space into a much lower (usually two or three) dimensional output space useful for visualization. Neither process requires human intervention, nor an external knowledge base. Together, these neural network techniques can be utilized to automatically identify the relevant information themes present in a corpus, and present those themes to the user in a intuitive visual form},
}
@inproceedings{p2385,
booktitle = {Proc. InfoVis},
year = 1996,
title = {Distortion viewing techniques for 3-dimensional data},
doi = {10.1109/INFVIS.1996.559215},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559215},
author = {Carpendale, S. and Cowperthwaite, D.J. and Fracchia, F.D.},
pages = {46--53, 119},
keywords = {distortion viewing, screen layout, 3D interaction, information visualization, interface metaphors, interface design issues},
abstract = {As the use of 3D information presentation becomes more prevalent, the need for effective viewing tools grows accordingly. Much work has been done in developing tools for 2D spaces which allow for detail in context views. We examine the extension of such 2D methods to 3D and explore the limitations encountered in accessing internal regions of the data with these methods. We then describe a novel solution to this problem of internal access with the introduction of a distortion function which creates a clear line of sight to the focus revealing sections previously obscured. The distortion is symmetric about the line of sight and is smoothly integrated back into the original 3D layout.},
}
@misc{p2386,
year = 1996,
title = {Dual multiresolution HyperSlice for multivariate data visualization},
doi = {10.1109/INFVIS.1996.559224},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559224},
author = {Pak Chung Wong and Crabb, A.H. and Bergeron, R.D.},
pages = {74--75, 127},
keywords = {},
abstract = {We present a new multiresolution visualization design which allows a user to control the physical data resolution as well as the logical display resolution of multivariate data. A system prototype is described which uses the HyperSlice representation. The notion of space projection in multivariate data is introduced. This process is coupled with wavelets to form a powerful tool for very large data visualization},
}
@misc{p2387,
year = 1996,
title = {FINESSE: a financial information spreadsheet},
doi = {10.1109/INFVIS.1996.559222},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559222},
author = {Varshney, A. and Kaufman, A.},
pages = {70--71, 125},
keywords = {},
abstract = {We outline a spreadsheet-based system for visualization of real-time financial information. Our system permits the user to define arithmetic and presentation relationships amongst the various cells of the spreadsheet. The cells contain primitives that can be numbers, text, images, functions and graphics. Presenting financial information in this format allows its intended clients, the financial analysts, to work in the familiar environment of a spreadsheet and allows them the flexibility afforded by the powerful interface of the spreadsheet paradigm. In addition, our system permits real-time visualization of the financial data stream allowing its user to visually trade the changing market trends in two and three dimensions},
}
@inproceedings{p2388,
booktitle = {Proc. InfoVis},
year = 1996,
title = {Geospatial metadata querying and visualization on the WWW using Java<sup>TM</sup> applets},
doi = {10.1109/INFVIS.1996.559225},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559225},
author = {Alper, N. and Stein, C.},
pages = {77--84, 128},
keywords = {},
abstract = {This paper presents the query and visualization interfaces of the Master Environmental Library (MEL) system. MEL uses the World Wide Web (WWW) to make accessible distributed data whose metadata conform to the Federal Geographic Data Committee's (FGDC) content standards for digital geospatial metadata. The interfaces are implemented as JavaTM applets and are more intuitive, interactive and possess greater functionality than their Hypertext Markup Language (HTML) counterparts. As well as querying, the interface allows users to visualize and manage the list of query results so that users can more quickly discover the datasets of real interest. Several new tools used to visualize attributes of the metadata are presented.},
}
@misc{p2389,
year = 1996,
title = {Interactive visualization of multiway tables},
doi = {10.1109/INFVIS.1996.559221},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559221},
author = {Cox, K.C. and Hackborn, D.},
pages = {68--69, 124},
keywords = {},
abstract = {Many business data visualization applications involve large databases with dozens of fields and millions of rows. Interactive visualization of these databases is difficult because of the large amount of data involved. We present a method of summarizing large databases which is well suited to interactive visualization. We illustrate this with a visualization tool for the domain of call billing data},
}
@misc{p2390,
year = 1996,
title = {Minimally-immersive interactive volumetric information visualization},
doi = {10.1109/INFVIS.1996.559220},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559220},
author = {Ebert, D.S. and Shaw, C. and Zwa, A. and Miller, E.L. and Roberts, D.A.},
pages = {66--67, 123},
keywords = {},
abstract = {This paper describes a minimally immersive volumetric interactive system for information visualization. The system, SFA, uses glyph-based volume rendering, enabling more information attributes to be visualized than traditional 2D and surface-based information visualization systems. Two-handed interaction and stereoscopic viewing combine to produce a minimally immersive interactive system that enhances the user's three-dimensional perception of the information space, capitalizing on the human visual system's pre-attentive learning capabilities to quickly analyze the displayed information. The paper describes the usefulness of this system for the visualization of document similarity within a corpus of textual documents. SFA allows the three-dimensional volumetric visualization, manipulation, navigation, and analysis of multivariate, time-varying information spaces, increasing the quantity and clarity of information conveyed from the visualization as compared to traditional 2D information systems},
}
@inproceedings{p2391,
booktitle = {Proc. InfoVis},
year = 1996,
title = {On the semantics of interactive visualizations},
doi = {10.1109/INFVIS.1996.559213},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559213},
author = {Chuah, M.C. and Roth, S.F.},
pages = {29--36},
keywords = {information visualization, interactive techniques, user interfaces, automatic presentation systems, graphics},
abstract = {Interactive techniques are powerful tools for manipulating visualizations to analyze, communicate and acquire information. This is especially true for large data sets or complex 3D visualizations. Although many new types of interaction have been introduced recently, very little work has been done on understanding what their components are, how they are related and how they can be combined. This paper begins to address these issues with a framework for classifying interactive visualizations. Our goal is a framework that will enable us to develop toolkits for assembling visualization interfaces both interactively and automatically},
}
@inproceedings{p2392,
booktitle = {Proc. InfoVis},
year = 1996,
title = {Rapid prototyping of information visualizations using VANISH},
doi = {10.1109/INFVIS.1996.559212},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559212},
author = {Kazman, R. and Carriere, J.},
pages = {21--28, 118},
keywords = {information visualization, software tools, visual programming languages},
abstract = {Discusses a software tool called VANISH (Visualizing And Navigating Information Structured Hierarchically), which supports the rapid prototyping of interactive 2D and 3D information visualizations. VANISH supports rapid prototyping through a special-purpose visual language called VaPL (VANISH Programming Language) tailored for visualizations, through a software architecture that insulates visualization-specific code from changes in both the domain being visualized and the presentation toolkit used, and through the reuse of visualization techniques between application domains. The generality of VANISH is established by showing how it is able to re-create a wide variety of ÔÇ£standardÔÇØ visualization techniques. VANISH's support for prototyping is shown through an extended example, where we build a C++ class browser, exploring many visualization alternatives in the process},
}
@inproceedings{p2393,
booktitle = {Proc. InfoVis},
year = 1996,
title = {Selection: 524,288 ways to say "this is interesting"},
doi = {10.1109/INFVIS.1996.559216},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559216},
author = {Wills, G.J.},
pages = {54--60, 120},
keywords = {},
abstract = {Visualization is a critical technology for understanding complex, data-rich systems. Effective visualizations make important features of the data immediately recognizable and enable the user to discover interesting and useful results by highlighting patterns. A key element of such systems is the ability to interact with displays of data by selecting a subset for further investigation. This operation is needed for use in linked-views systems and in drill-down analysis. It is a common manipulation in many other systems. It is as ubiquitous as selecting icons in a desktop GUI. It is therefore surprising to note that little research has been done on how selection can be implemented. This paper addresses this omission, presenting a taxonomy for selection mechanisms and discussing the interactions between branches of the taxonomy. Our suggestion of 524,288 possible systems [216 operation systems×2 (memory/memoryless)×2 (data-dependent/independent)×2 (brush/lasso)] is more in fun than serious, as within the taxonomy there are many different choices that can be made. This framework is the result of considering both the current state of the art and historical antecedents},
}
@inproceedings{p2394,
booktitle = {Proc. InfoVis},
year = 1996,
title = {Techniques for non-linear magnification transformations},
doi = {10.1109/INFVIS.1996.559214},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559214},
author = {Keahey, T.A. and Robertson, E.L.},
pages = {38--45},
keywords = {},
abstract = {This paper presents efficient methods for implementing general non-linear magnification transformations. Techniques are provided for: combining linear and non-linear magnifications, constraining the domain of magnifications, combining multiple transformations, and smoothly interpolating between magnified and normal views. In addition, piecewise linear methods are introduced which allow greater efficiency and expressiveness than their continuous counterparts},
}
@misc{p2395,
year = 1996,
title = {Towards rich information landscapes for visualising structured Web spaces},
doi = {10.1109/INFVIS.1996.559218},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559218},
author = {Andrews, K. and Pichler, M. and Wolf, P.},
pages = {62--63, 121},
keywords = {},
abstract = {The Harmony browser for the Hyper-G Web server utilises Hyper-G's rich data model to provide a number of tightly-coupled, two- and three-dimensional visualisation and navigational facilities. In particular the Harmony Information Landscape visualises the hierarchical structure of Hyper-G spaces upon a plane in three-dimensional space. The Harmony Information Landscape has now been extended to display a combined structure and link map by selectively superimposing hyperlink relationships in the vertical dimension above and below the hierarchy map. In addition, documents returned by search queries may be selectively ÔÇ£plottedÔÇØ in the landscape, indicating their whereabouts in a broader context, and several sets of 3D icons are available for representing the various document types},
}
@inproceedings{p2396,
booktitle = {Proc. InfoVis},
year = 1996,
title = {Visage: a user interface environment for exploring information},
doi = {10.1109/INFVIS.1996.559210},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559210},
author = {Roth, S.F. and Lucas, P. and Senn, J.A. and Gomberg, C.C. and Burks, M.B. and Stroffolino, P.J. and Kolojechick, A.J. and Dunmire, C.},
pages = {3--12, 116},
keywords = {Visualization, exploratory data analysis, graphics, user interface environment, human-computer interaction},
abstract = {Visage is a prototype user interface environment for exploring and analyzing information. It represents an approach to coordinating multiple visualizations, analysis and presentation tools in data-intensive domains. Visage is based on an information-centric approach to user interface design which strives to eliminate impediments to direct user access to information objects across applications and visualizations. Visage consists of a set of data manipulation operations, an intelligent system for generating a wide variety of data visualizations (SAGE) and a briefing tool that supports the conversion of visual displays used during exploration into interactive presentation slides. This paper presents the user interface components and styles of interaction that are central to Visage's information-centric approach},
}
@inproceedings{p2397,
booktitle = {Proc. InfoVis},
year = 1996,
title = {Visualizing a tennis match},
doi = {10.1109/INFVIS.1996.559229},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559229},
author = {Liqun Jin and Banks, D.C.},
pages = {108--114, 132},
keywords = {},
abstract = {This paper describes our work on visualizing the information of a tennis match. We use competition trees to organize the information of a tennis match and visualize the competition trees by the top-nesting layered maps with translucent colored layers. We create iconic representations to describe the detailed information of athletic events in an intuitive manner. Specialized views of the information are displayed by applying multiple Magic Lens filters on the top-nesting layered maps. The dynamic nature of the tennis match is depicted by the time-varying display. The approach we present in this paper can be used to visualize other sports information, information with competition property, or information with hierarchical structure},
}
@inproceedings{p2398,
booktitle = {Proc. InfoVis},
year = 1996,
title = {Visualizing the global topology of the MBone},
doi = {10.1109/INFVIS.1996.559226},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559226},
author = {Munzner, T. and Hoffman, E. and Claffy, K. and Fenner, B.},
pages = {85--92, 129},
keywords = {},
abstract = {We present a case study of visualizing the global topology of the Internet MBone. The MBone is the Internet's multicast backbone. Multicast is the most efficient way of distributing data from one sender to multiple receivers with minimal packet duplication. Developed and initially deployed by researchers within the Internet community, the MBone has been extremely popular for efficient transmission across the Internet of real-time video and audio streams such as conferences, meetings, congressional sessions, and NASA shuttle launches. The MBone, like the Internet itself grew exponentially with no central authority. The resulting suboptimal topology is of growing concern to network providers and the multicast research community. We create a geographic representation of the tunnel structure as arcs on a globe by resolving the latitude and longitude of MBone routers. The interactive 3D maps permit an immediate understanding of the global structure unavailable from the data in its original form as lines of text with only hostnames and IP addresses. Data visualization techniques such as grouping and thresholding allow further analysis of specific aspects of the MBone topology. We distribute the interactive 3D maps through the World-Wide Web using the VRML file format thus allowing network maintainers throughout the world to analyze the structure move effectively than would be possible with still pictures or pre-made videos},
}
@misc{p2399,
year = 1996,
title = {Visualizing the results of multimedia Web search engines},
doi = {10.1109/INFVIS.1996.559219},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559219},
author = {Mukherjea, S. and Hirata, K. and Hara, Y.},
pages = {64--65, 122},
keywords = {},
abstract = {Search engines are very useful because they allow the user to retrieve documents of interest from the World-Wide Web. However, if the user's query results in lots of records to be retrieved, just listing the results is not very user-friendly. We are developing a system that allows the visualization of the results. Visualizations of both text and image search are generated on the fly based on the search results},
}
@inproceedings{p2400,
booktitle = {Proc. InfoVis},
year = 1996,
title = {Visualizing usability log data},
doi = {10.1109/INFVIS.1996.559227},
url = {http://dx.doi.org/10.1109/INFVIS.1996.559227},
author = {Gray, M. and Badre, A. and Guzdial, M.},
pages = {93--98, 130},
keywords = {},
abstract = {Our approach to testing graphical user interfaces involves logging large amounts of data. These logs capture information at the key press and mouse click level about how an application is used. Since the raw data is voluminous and not at a useful level of detail, we use analysis and visualization to find information that is interesting and useful to a usability analyst but was previously buried in the data. We call some of our custom visualizations ÔÇ£contextualÔÇØ meaning they use key elements of the context the data was collected in as an organizing structure. We expect this type of visualization to be easier and faster to understand and more helpful than traditional charts. We hope that our finding a natural geometry for these visualizations will inspire others whose data apparently has no inherent geometry to find natural ways to visualize their data},
}
@inproceedings{p2471,
booktitle = {Proc. InfoVis},
year = 1995,
title = {Case study: 3D displays of Internet traffic},
doi = {10.1109/INFVIS.1995.528697},
url = {http://dx.doi.org/10.1109/INFVIS.1995.528697},
author = {Cox, K.C. and Eick, S.G.},
pages = {129--131},
keywords = {},
abstract = {The explosive growth in world-wide communications, especially the Internet, has highlighted the need for techniques to visualize network traffic. The traditional node and link network displays work well for small datasets but become visually cluttered and uninterpretable for large datasets. A natural 3D metaphor for displaying world-wide network data is to position the nodes on a globe and draw arcs between them coding the traffic. This technique has several advantages of over the traditional 2D displays, it naturally reduces line crossing clutter, provides an intuitive model for navigation and indication of time, and retains the geographic context. Coupling these strengths with some novel interaction techniques involving the globe surface translucency and arc heights illustrates the usefulness for this class of displays.},
}
@inproceedings{p2472,
booktitle = {Proc. InfoVis},
year = 1995,
title = {Case study: an empirical investigation of thumbnail image recognition},
doi = {10.1109/INFVIS.1995.528695},
url = {http://dx.doi.org/10.1109/INFVIS.1995.528695},
author = {Burton, C.A. and Johnston, L.J. and Sonenberg, E.A.},
pages = {115--121},
keywords = {},
abstract = {The use of thumbnails (i.e., miniatures) in the user-interface of image databases allows searching and selection of images without the need for naming policies. Treating parent images prior to reduction with edge-detecting smoothing, lossy image compression, or static codebook compression resulted in thumbnails where the distortion caused by reduction was lessened. An experiment assessing these techniques found resulting thumbnails could be recognised more quickly and accurately than thumbnails of the same parent images that had been reduced without treatment. This pretreatment in thumbnail creation is offered as an improvement.},
}
@inproceedings{p2473,
booktitle = {Proc. InfoVis},
year = 1995,
title = {Case study: fishing for information on the Internet},
doi = {10.1109/INFVIS.1995.528693},
url = {http://dx.doi.org/10.1109/INFVIS.1995.528693},
author = {Mitchell, R. and Day, D. and Hirschman, L.},
pages = {105--111},
keywords = {},
abstract = {As the Internet continues to grow, the amount of accessible information becomes increasingly vast. Search tools exist that allow users to find relevant information. However, a search can often produce such a large amount of data that it becomes hard to ferret out the most appropriate and highest quality information. In addition, some search tools lose valuable information when displaying the results to the user. The paper describes a search visualization tool, called FISH, for viewing hierarchically structured information and managing information overload. FISH (Forager for the Information Super Highway) allows users to visualize the results of search requests across large document spaces in a way that preserves the structure of the information space. FISH displays the returned documents as rectangles, using a combination of order, indentation, size, and color to denote document hierarchy, the score of the documents with respect to the search, and other data attributes. In addition, the user can navigate through the document space for in-depth probing and refinement.},
}
@inproceedings{p2474,
booktitle = {Proc. InfoVis},
year = 1995,
title = {Case study: visualizing Internet resources},
doi = {10.1109/INFVIS.1995.528696},
url = {http://dx.doi.org/10.1109/INFVIS.1995.528696},
author = {Gershon, N. and LeVasseur, J. and Winstead, J. and Croall, J. and Pernicks, A. and Ruh, W.},
pages = {122--128},
keywords = {},
abstract = {The goal is to improve the ability of people from all walks of life and interests to access, search, and use the information distributed in Internet resources. The process of interacting with information resources starts with browsing, continues with digesting and assimilating pieces of information, terminates with generation of new information, and begins anew with analysis of pre-existing and new information. Our approach is user-centric-taking users needs into account by allowing them to interact with the information contained in large arrays of documents. The visualization process is an integral part of the overall process. We have covered three related categories in this methodology. The first one is browsing through the World-Wide Web (WWW) hyperspace without becoming lost, based on a visual representation of the hyperspace hierarchical structure (hyperspace view). The second category is overcoming the rigidity of the WWW by allowing the user to construct interactively and visually a personal hyperspace of information, linking the documents according to the application or problem domain, or to the user's own perception, experience, culture, or way of thinking. The third category includes discovery and analysis of new information and relationships in retrieved documents by aggregating relevant information and representing it visually.},
}
@inproceedings{p2475,
booktitle = {Proc. InfoVis},
year = 1995,
title = {Case study. A WWW viewpoint on scientific visualization: an EPA case study for technology transfer},
doi = {10.1109/INFVIS.1995.528694},
url = {http://dx.doi.org/10.1109/INFVIS.1995.528694},
author = {Rhyne, T.M.},
pages = {112--114},
keywords = {},
abstract = {The paper examines how to provide scientific visualization capabilities to environmental scientists, policy analysts and decision makers with personal computers (PCs) on their desktops. An approach for using the World Wide Web (WWW) for disseminating knowledge on scientific visualization and for intelligent access to visualization capabilities on high performance (UNIX) workstations is outlined.},
}
@inproceedings{p2476,
booktitle = {Proc. InfoVis},
year = 1995,
title = {Case study. Narcissus: visualising information},
doi = {10.1109/INFVIS.1995.528691},
url = {http://dx.doi.org/10.1109/INFVIS.1995.528691},
author = {Hendley, R.J. and Drew, N.S. and Wood, A.M. and Beale, R.},
pages = {90--96},
keywords = {},
abstract = {It is becoming increasingly important that support is provided for users who are dealing with complex information spaces. The need is driven by the growing number of domains where there is a requirement for users to understand, navigate and manipulate large sets of computer based data; by the increasing size and complexity of this information and by the pressures to use this information efficiently. The paradigmatic example is the World Wide Web, but other domains include software systems, information systems and concurrent engineering. One approach to providing this support is to provide sophisticated visualisation tools which lead the users to form an intuitive understanding of the structure and behaviour of their domain and which provide mechanisms which allow them to manipulate objects within their system. The paper describes such a tool and a number of visualisation techniques that it implements.},
}
@inproceedings{p2477,
booktitle = {Proc. InfoVis},
year = 1995,
title = {Case study. Visualising cyberspace: information visualisation in the Harmony Internet browser},
doi = {10.1109/INFVIS.1995.528692},
url = {http://dx.doi.org/10.1109/INFVIS.1995.528692},
author = {Andrews, K.},
pages = {97--104},
keywords = {},
abstract = {The explosive growth of information systems on the Internet has clearly demonstrated the need to organise, filter, and present information in ways which allow users to cope with the sheer quantities of information available. The scope for visualisation of Gopher and WWW spaces is restricted by the limitations of their respective data models. The far richer data model supported by the Hyper-G Internet information system is exploited by its Harmony client to provide a number of tightly-coupled, two- and three-dimensional visualisation and navigational facilities, which help provide location feedback and alleviate user disorientation.},
}
@inproceedings{p2478,
booktitle = {Proc. InfoVis},
year = 1995,
title = {IVEE: an Information Visualization and Exploration Environment},
doi = {10.1109/INFVIS.1995.528688},
url = {http://dx.doi.org/10.1109/INFVIS.1995.528688},
author = {Ahlberg, C. and Wistrand, E.},
pages = {66--73},
keywords = {},
abstract = {The Information Visualization and Exploration Environment (NEE) is a system for automatic creation of dynamic queries applications. IVEE imports database relations and automatically creates environments holding visualizations and query devices. IVEE offers multiple visualizations such as maps and starfields, and multiple query devices, such as sliders, alphasliders, and toggles. Arbitrary graphical objects can be attached to database objects in visualizations. Multiple visualizations may be active simultaneously. Users can interactively lay out and change between types of query devices. Users may retrieve details-on-demand by clicking on visualization objects. An HTML file may be provided along with the database, specifying how details-on-demand information should be presented, allowing for presentation of multimedia information in database objects. Finally, multiple IVEE clients running on separate workstations on a network can communicate by letting one user's actions affect the visualization in an another IVEE client.},
}
@inproceedings{p2479,
booktitle = {Proc. InfoVis},
year = 1995,
title = {Research report: improving browsing in information by the automatic display layout},
doi = {10.1109/INFVIS.1995.528683},
url = {http://dx.doi.org/10.1109/INFVIS.1995.528683},
author = {Luders, P. and Ernst, R.},
pages = {26--33},
keywords = {},
abstract = {It is well known that graphical representations could be very helpful to browse in graph structured information. But this promising approach requires the capability of an automatic layout system because the tedious and time consuming task of a manual layout leads to a rejection of this approach by the user. In our approach, we split the task of retrieving information into two phases that are getting the orientation within the network and reading currently visited information. We present layout algorithms for both phases which have the benefit of being flexible and adaptable to individual user requests and ensure the topological consistency, i.e. the stability of the topology of the information layout during a sequence of display layouts. The results show that especially the possibility of an animation of the layout process can assist the user essentially in maintaining the orientation in the information network.},
}
@inproceedings{p2480,
booktitle = {Proc. InfoVis},
year = 1995,
title = {Research report: information animation applications in the capital markets},
doi = {10.1109/INFVIS.1995.528682},
url = {http://dx.doi.org/10.1109/INFVIS.1995.528682},
author = {Wright, W.},
pages = {19--25},
keywords = {},
abstract = {3D computer graphics can be extremely expressive. It is possible to display an entire securities market, like the S&P 500, on a single screen. With the correct approach to the visual design of the layout, these massive amounts of information can be quickly and easily comprehended by a human observer. By using motion and animated interaction, it is possible to use 3D as a reliable, accurate and precise decision-support tool. Information animation applications are particularly suited to the securities industry because that is where we find huge amounts of data, the value of which declines rapidly with time, and where critical decisions are being made on this data in very short periods of time. Information animation technology is an important new tool for the securities industry, where people need to be in the decision-making loop without suffering from information overload. Several examples are discussed including equity trading analytics, fixed income trading analytics and fixed-income risk viewing.},
}
@inproceedings{p2481,
booktitle = {Proc. InfoVis},
year = 1995,
title = {Research report. DataSpace: 3-D visualizations of large databases},
doi = {10.1109/INFVIS.1995.528690},
url = {http://dx.doi.org/10.1109/INFVIS.1995.528690},
author = {Anupam, V. and Dar, S. and Leibfried, T. and Petajan, E.},
pages = {82--88},
keywords = {},
abstract = {DataSpace is a system for interactive 3-D visualization and analysis of large databases. DataSpace utilizes the display space by placing panels of information, possibly generated by different visualization applications, in a 3-D graph layout, and providing continuous navigation facilities. Selective rearrangements and transparency can be used to reduce occlusion or to compare or merge a set of images (e.g. line graphs or scatter plots) that are aligned and stacked in depth. A prototype system supporting the basic 3-D graphic operations (layout, zoom, rotation, translation, transparency) has been implemented. We provide several illustrative examples of DataSpace displays taken from the current system. We present the
View raw

(Sorry about that, but we can’t show files that are this big right now.)

View raw

(Sorry about that, but we can’t show files that are this big right now.)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment