cg

changeset 85:da8f81785211

.
author bshanks@bshanks.dyndns.org
date Tue Apr 21 03:36:06 2009 -0700 (16 years ago)
parents d89a99c9ea9a
children aafe6f8c3593
files grant.bib grant.html grant.odt grant.pdf grant.txt
line diff
1.1 --- a/grant.bib Tue Apr 21 00:54:22 2009 -0700 1.2 +++ b/grant.bib Tue Apr 21 03:36:06 2009 -0700 1.3 @@ -361,4 +361,112 @@ 1.4 author = {Constance M. Smith and Jacqueline H. Finger and Terry F. Hayamizu and Ingeborg J. {McCright} and Janan T. Eppig and James A. Kadin and Joel E. Richardson and Martin Ringwald}, 1.5 year = {2007}, 1.6 pages = {D618--623} 1.7 +}, 1.8 + 1.9 +@article{annese_myelo-architectonic_2004, 1.10 + title = {A myelo-architectonic method for the structural classification of cortical areas}, 1.11 + volume = {21}, 1.12 + issn = {1053-8119}, 1.13 + url = {http://www.sciencedirect.com/science/article/B6WNP-4B5JN94-1/2/c9519ed20d3002e0b0316bcf0031e7a2}, 1.14 + doi = {10.1016/j.neuroimage.2003.08.024}, 1.15 + abstract = {We describe an automatic and reproducible method to analyze the histological design of the cerebral cortex as applied to brain sections stained to reveal myelinated fibers. The technique provides an evaluation of the distribution of myelination across the width of the cortical mantle in accordance with a model of its curvature and its intrinsic geometry. The profile lines along which the density of staining is measured are generated from the solution of a partial differential equation {(PDE)} that models the intermediate layers of the cortex. Cortical profiles are classified according to significant components that emerge from wavelet analysis. Intensity profiles belonging to each distinct class are normalized and averaged to produce area-specific templates of cortical myelo-architecture.}, 1.16 + number = {1}, 1.17 + journal = {{NeuroImage}}, 1.18 + author = {J. Annese and A. Pitiot and I. D. Dinov and A. W. Toga}, 1.19 + year = {2004}, 1.20 + keywords = {Cerebral {Cortex,Cortical} {areas,Myelo-architecture}}, 1.21 + pages = {15--26} 1.22 +}, 1.23 + 1.24 +@article{schleicher_stereological_2000, 1.25 + title = {A stereological approach to human cortical architecture: identification and delineation of cortical areas}, 1.26 + volume = {20}, 1.27 + issn = {0891-0618}, 1.28 + url = {http://www.sciencedirect.com/science/article/B6T02-43HDYPB-5/2/461101884330ed9e8b29a5f4195a349f}, 1.29 + doi = {{10.1016/S0891-0618(00)00076-4}}, 1.30 + abstract = {Stereology offers a variety of procedures to analyze quantitatively the regional and laminar organization in cytoarchitectonically defined areas of the human cerebral cortex. Conventional anatomical atlases are of little help in localizing specific cortical areas, since most of them are based on a single brain and use highly observer-dependent criteria for the delineation of cortical areas. In consequence, numerous cortical maps exist which greatly differ with respect to number, position, size and extent of cortical areas. We describe a novel algorithm-based procedure for the delineation of cortical areas, which exploits the automated estimation of volume densities of cortical cell bodies. Spatial sampling of the laminar pattern is performed with density profiles, followed by multivariate analysis of the profiles[`] shape, which locates the cytoarchitectonic borders between neighboring cortical areas at sites where the laminar pattern changes significantly. The borders are then mapped to a human brain atlas system comprising tools for three dimensional reconstruction, visualization and morphometric analysis. A sample of brains with labeled cortical areas is warped into the reference brain of the atlas system in order to generate a population map of the cortical areas, which describes the intersubject variability in spatial conformation of cortical areas. These population maps provide a novel tool for the interpretation of images obtained with functional imaging techniques.}, 1.31 + number = {1}, 1.32 + journal = {Journal of Chemical Neuroanatomy}, 1.33 + author = {A. Schleicher and K. Amunts and S. Geyer and T. Kowalski and T. Schormann and N. {Palomero-Gallagher} and K. Zilles}, 1.34 + month = oct, 1.35 + year = {2000}, 1.36 + keywords = {Cerebral {Cortex,Density} {profile,Multivariate} {statistics,Quantitative} {cytoarchitecture,Stereology-brain} mapping}, 1.37 + pages = {31--47} 1.38 +}, 1.39 + 1.40 +@article{schmitt_detection_2003, 1.41 + title = {Detection of cortical transition regions utilizing statistical analyses of excess masses}, 1.42 + volume = {19}, 1.43 + issn = {1053-8119}, 1.44 + url = {http://www.sciencedirect.com/science/article/B6WNP-488VX9X-2/2/4a7467890b69d13dec8261a4f6fc66d5}, 1.45 + doi = {{10.1016/S1053-8119(03)00040-5}}, 1.46 + abstract = {A new statistical approach for observer-assisted detection of transition regions of adjacent cytoarchitectonic areas within the human cerebral cortex was developed. This method analyzes the structural information of cytoarchitectural profiles (e.g., the modality of a gray level intensity distribution) based on observed excess mass differences verified by a suitable statistical test. Profiles were generated by scanning the cerebral cortex over respective regions of interest that were oriented to trajectories running parallel to the orientation of cell columns. For each single profile, determination of excess masses provided evidence for a certain number of peaks in the cell density, thereby avoiding fluctuation due solely to sampling anomalies. Comparing such excess mass measurements by means of multiple local rank tests over a wide range of profiles allowed for the detection of cytoarchitectural inhomogeneities at respective given confidence levels. Special parameters (e.g., level of significance, width of targeted region, number of peaks) then could be adapted to specific pattern recognition problems in lamination analyses. Such analyses of excess masses provided a general tool for observer-assisted evaluation of profile arrays. This observer-assisted statistical method was applied to five different cortical examples. It detected the same transition regions that had been determined earlier through direct examination of samples, despite cortical convexities, concavities, and some minor staining inhomogeneities.}, 1.47 + number = {1}, 1.48 + journal = {{NeuroImage}}, 1.49 + author = {Oliver Schmitt and Lars Hömke and Lutz Dümbgen}, 1.50 + month = may, 1.51 + year = {2003}, 1.52 + keywords = {Brain {mapping,Cerebral} {Cortex,Cytoarchitecture,Excess} {mass,Lamination,Multiple} local rank {test,Neuroimaging,Profiles,Trajectories,Transition} {regions,Traverses}}, 1.53 + pages = {42--63} 1.54 +}, 1.55 + 1.56 +@article{schleicher_quantitative_2005, 1.57 + title = {Quantitative architectural analysis: a new approach to cortical mapping}, 1.58 + volume = {210}, 1.59 + url = {http://dx.doi.org/10.1007/s00429-005-0028-2}, 1.60 + doi = {10.1007/s00429-005-0028-2}, 1.61 + abstract = {Abstract Recent progress in anatomical and functional {MRI} has revived the demand for a reliable, topographic map of the human cerebral 1.62 +cortex. Till date, interpretations of specific activations found in functional imaging studies and their topographical analysis 1.63 +in a spatial reference system are, often, still based on classical architectonic maps. The most commonly used reference atlas 1.64 +is that of Brodmann and his successors, despite its severe inherent drawbacks. One obvious weakness in traditional, architectural 1.65 +mapping is the subjective nature of localising borders between cortical areas, by means of a purely visual, microscopical 1.66 +examination of histological specimens. To overcome this limitation, more objective, quantitative mapping procedures have been 1.67 +established in the past years. The quantification of the neocortical, laminar pattern by defining intensity line profiles 1.68 +across the cortical layers, has a long tradition. During the last years, this method has been extended to enable a reliable, 1.69 +reproducible mapping of the cortex based on image analysis and multivariate statistics. Methodological approaches to such 1.70 +algorithm-based, cortical mapping were published for various architectural modalities. In our contribution, principles of 1.71 +algorithm-based mapping are described for cyto- and receptorarchitecture. In a cytoarchitectural parcellation of the human 1.72 +auditory cortex, using a sliding window procedure, the classical areal pattern of the human superior temporal gyrus was modified 1.73 +by a replacing of Brodmann’s areas 41, 42, 22 and parts of area 21, with a novel, more detailed map. An extension and optimisation 1.74 +of the sliding window procedure to the specific requirements of receptorarchitectonic mapping, is also described using the 1.75 +macaque central sulcus and adjacent superior parietal lobule as a second, biologically independent example. Algorithm-based 1.76 +mapping procedures, however, are not limited to these two architectural modalities, but can be applied to all images in which 1.77 +a laminar cortical pattern can be detected and quantified, e.g. myeloarchitectonic and in vivo high resolution {MR} imaging. 1.78 +Defining cortical borders, based on changes in cortical lamination in high resolution, in vivo structural {MR} images will result 1.79 +in a rapid increase of our knowledge on the structural parcellation of the human cerebral cortex.}, 1.80 + number = {5}, 1.81 + journal = {Anatomy and Embryology}, 1.82 + author = {A. Schleicher and N. {Palomero-Gallagher} and P. Morosan and S. Eickhoff and T. Kowalski and K. Vos and K. Amunts and K. Zilles}, 1.83 + month = dec, 1.84 + year = {2005}, 1.85 + pages = {373--386} 1.86 +}, 1.87 + 1.88 +@article{kruggel_analyzingneocortical_2003, 1.89 + title = {Analyzing the neocortical fine-structure}, 1.90 + volume = {7}, 1.91 + issn = {1361-8415}, 1.92 + url = {http://www.sciencedirect.com/science/article/B6W6Y-48FSTG9-3/2/5a6f5b703630037afeea6067c27b42be}, 1.93 + doi = {{10.1016/S1361-8415(03)00006-9}}, 1.94 + abstract = {Cytoarchitectonic fields of the human neocortex are defined by characteristic variations in the composition of a general six-layer structure. It is commonly accepted that these fields correspond to functionally homogeneous entities. Diligent techniques were developed to characterize cytoarchitectonic fields by staining sections of post-mortem brains and subsequent statistical evaluation. Fields were found to show a considerable interindividual variability in extent and relation to macroscopic anatomical landmarks. With upcoming new high-resolution magnetic resonance imaging {(MRI)} protocols, it appears worthwhile to examine the feasibility of characterizing the neocortical fine-structure from anatomical {MRI} scans, thus, defining neocortical fields by in vivo techniques. A fixated brain hemisphere was scanned at a resolution of approximately 0.3 mm. After correcting for intensity inhomogeneities in the dataset, the cortex boundaries (the white/grey matter and grey matter/background interfaces) were determined as a triangular mesh. Radial intensity profiles following the shortest path through the cortex were computed and characterized by a sparse set of features. A statistical similarity measure between features of different regions was defined, and served to define the extent of Brodmann's Areas 4, 17, 44 and 45 in this dataset.}, 1.95 + number = {3}, 1.96 + journal = {Medical Image Analysis}, 1.97 + author = {F. Kruggel and M. K. Brückner and Th. Arendt and C. J. Wiggins and D. Y. von Cramon}, 1.98 + month = sep, 1.99 + year = {2003}, 1.100 + pages = {251--264} 1.101 +}, 1.102 + 1.103 +@inbook{adamson_tracking_2005, 1.104 + series = {Lecture Notes in Computer Science}, 1.105 + title = {A Tracking Approach to Parcellation of the Cerebral Cortex}, 1.106 + volume = {Volume 3749/2005}, 1.107 + isbn = {978-3-540-29327-9}, 1.108 + url = {http://dx.doi.org/10.1007/11566465_37}, 1.109 + abstract = {The cerebral cortex is composed of regions with distinct laminar structure. Functional neuroimaging results are often reported with respect to these regions, usually by means of a brain “atlas”. Motivated by the need for more precise atlases, and the lack of model-based approaches in prior work in the field, this paper introduces a novel approach to parcellating the cortex into regions of distinct laminar structure, based on the theory of target tracking. The cortical layers are modelled by hidden Markov models and are tracked to determine the Bayesian evidence of layer hypotheses. This model-based parcellation method, evaluated here on a set of histological images of the cortex, is extensible to {3-D} images.}, 1.110 + booktitle = {Medical Image Computing and {Computer-Assisted} Intervention – {MICCAI} 2005}, 1.111 + publisher = {Springer Berlin / Heidelberg}, 1.112 + author = {Chris Adamson and Leigh Johnston and Terrie Inder and Sandra Rees and Iven Mareels and Gary Egan}, 1.113 + year = {2005}, 1.114 + pages = {294--301} 1.115 } 1.116 \ No newline at end of file
2.1 --- a/grant.html Tue Apr 21 00:54:22 2009 -0700 2.2 +++ b/grant.html Tue Apr 21 03:36:06 2009 -0700 2.3 @@ -24,27 +24,22 @@ 2.4 machine-readable datasets developed in aim (3), will be published and freely available for others to use. 2.5 Background and significance 2.6 Aim 1: Given a map of regions, find genes that mark the regions 2.7 -After defining terms, we will describe a set of principles which determine our strategy to completing this aim. 2.8 -Machine learning terminology: supervised learning The task of looking for marker genes for known anatomical 2.9 -regions means that one is looking for a set of genes such that, if the expression level of those genes is known, then the 2.10 -locations of the regions can be inferred. 2.11 -If we define the regions so that they cover the entire anatomical structure to be divided, then instead of saying that we 2.12 -are using gene expression to find the locations of the regions, we may say that we are using gene expression to determine to 2.13 -which region each voxel within the structure belongs. We call this a classification task, because each voxel is being assigned 2.14 -to a class (namely, its region). 2.15 -Therefore, an understanding of the relationship between the combination of their expression levels and the locations of 2.16 -the regions may be expressed as a function. The input to this function is a voxel, along with the gene expression levels 2.17 -within that voxel; the output is the regional identity of the target voxel, that is, the region to which the target voxel belongs. 2.18 -We call this function a classifier. In general, the input to a classifier is called an instance, and the output is called a label 2.19 -(or a class label). 2.20 +Machine learning terminology The task of looking for marker genes for known anatomical regions means that one is 2.21 +looking for a set of genes such that, if the expression level of those genes is known, then the locations of the regions can be 2.22 +inferred. 2.23 +If we define the regions so that they cover the entire anatomical structure to be divided, we may say that we are using 2.24 +gene expression to determine to which region each voxel within the structure belongs. We call this a classification task, 2.25 +because each voxel is being assigned to a class (namely, its region). An understanding of the relationship between the 2.26 +combination of their expression levels and the locations of the regions may be expressed as a function. The input to this 2.27 +function is a voxel, along with the gene expression levels within that voxel; the output is the regional identity of the target 2.28 +voxel, that is, the region to which the target voxel belongs. We call this function a classifier. In general, the input to a 2.29 +classifier is called an instance, and the output is called a label (or a class label). 2.30 The object of aim 1 is not to produce a single classifier, but rather to develop an automated method for determining a 2.31 classifier for any known anatomical structure. Therefore, we seek a procedure by which a gene expression dataset may be 2.32 -analyzed in concert with an anatomical atlas in order to produce a classifier. Such a procedure is a type of a machine learning 2.33 -procedure. The construction of the classifier is called training (also learning), and the initial gene expression dataset used 2.34 -in the construction of the classifier is called training data. 2.35 -In the machine learning literature, this sort of procedure may be thought of as a supervised learning task, defined as a 2.36 -task in which the goal is to learn a mapping from instances to labels, and the training data consists of a set of instances 2.37 -(voxels) for which the labels (regions) are known. 2.38 +analyzed in concert with an anatomical atlas in order to produce a classifier. The initial gene expression dataset used in 2.39 +the construction of the classifier is called training data. In the machine learning literature, this sort of procedure may be 2.40 +thought of as a supervised learning task, defined as a task in which the goal is to learn a mapping from instances to labels, 2.41 +and the training data consists of a set of instances (voxels) for which the labels (regions) are known. 2.42 Each gene expression level is called a feature, and the selection of which genes1 to include is called feature selection. 2.43 Feature selection is one component of the task of learning a classifier. Some methods for learning classifiers start out with 2.44 a separate feature selection phase, whereas other methods combine feature selection with other aspects of training. 2.45 @@ -53,14 +48,14 @@ 2.46 procedure may be used in which features are added and subtracted from the selected set depending on how much they raise 2.47 the score. Such procedures are called &#8220;stepwise&#8221; or &#8220;greedy&#8221;. 2.48 Although the classifier itself may only look at the gene expression data within each voxel before classifying that voxel, the 2.49 -learning algorithm which constructs the classifier may look over the entire dataset. We can categorize score-based feature 2.50 -selection methods depending on how the score of calculated. Often the score calculation consists of assigning a sub-score to 2.51 -each voxel, and then aggregating these sub-scores into a final score (the aggregation is often a sum or a sum of squares or 2.52 -average). If only information from nearby voxels is used to calculate a voxel&#8217;s sub-score, then we say it is a local scoring 2.53 -method. If only information from the voxel itself is used to calculate a voxel&#8217;s sub-score, then we say it is a pointwise scoring 2.54 -method. 2.55 -Key questions when choosing a learning method are: What are the instances? What are the features? How are the 2.56 -features chosen? Here are four principles that outline our answers to these questions. 2.57 +algorithm which constructs the classifier may look over the entire dataset. We can categorize score-based feature selection 2.58 +methods depending on how the score of calculated. Often the score calculation consists of assigning a sub-score to each voxel, 2.59 +and then aggregating these sub-scores into a final score (the aggregation is often a sum or a sum of squares or average). If 2.60 +only information from nearby voxels is used to calculate a voxel&#8217;s sub-score, then we say it is a local scoring method. If only 2.61 +information from the voxel itself is used to calculate a voxel&#8217;s sub-score, then we say it is a pointwise scoring method. 2.62 +Our strategy for Aim 1 2.63 +Key questions when choosing a learning method are: What are the instances? What are the features? How are the features 2.64 +chosen? Here are four principles that outline our answers to these questions. 2.65 Principle 1: Combinatorial gene expression 2.66 It is too much to hope that every anatomical region of interest will be identified by a single gene. For example, in the 2.67 cortex, there are some areas which are not clearly delineated by any gene included in the Allen Brain Atlas (ABA) dataset. 2.68 @@ -75,13 +70,13 @@ 2.69 want to use the expression of marker genes as a trigger for some regionally-targeted intervention, then our intervention must 2.70 contain a molecular mechanism to check the expression level of each marker gene before it triggers. It is currently infeasible 2.71 to design a molecular trigger that checks the level of more than a handful of genes. Similarly, if the goal is to develop a 2.72 -_________________________________________ 2.73 - 1Strictly speaking, the features are gene expression levels, but we&#8217;ll call them genes. 2.74 procedure to do ISH on tissue samples in order to label their anatomy, then it is infeasible to label more than a few genes. 2.75 Therefore, we must select only a few genes as features. 2.76 The requirement to find combinations of only a small number of genes limits us from straightforwardly applying many 2.77 of the most simple techniques from the field of supervised machine learning. In the parlance of machine learning, our task 2.78 combines feature selection with supervised learning. 2.79 +_________________________________________ 2.80 + 1Strictly speaking, the features are gene expression levels, but we&#8217;ll call them genes. 2.81 Principle 3: Use geometry in feature selection 2.82 When doing feature selection with score-based methods, the simplest thing to do would be to score the performance of 2.83 each voxel by itself and then combine these scores (pointwise scoring). A more powerful approach is to also use information 2.84 @@ -91,8 +86,7 @@ 2.85 There are many anatomical structures which are commonly characterized in terms of a two-dimensional manifold. When 2.86 it is known that the structure that one is looking for is two-dimensional, the results may be improved by allowing the analysis 2.87 algorithm to take advantage of this prior knowledge. In addition, it is easier for humans to visualize and work with 2-D 2.88 -data. 2.89 -Therefore, when possible, the instances should represent pixels, not voxels. 2.90 +data. Therefore, when possible, the instances should represent pixels, not voxels. 2.91 Related work 2.92 There is a substantial body of work on the analysis of gene expression data, most of this concerns gene expression data 2.93 which are not fundamentally spatial2. 2.94 @@ -103,37 +97,29 @@ 2.95 we believe that domain-specific scoring measures (such as gradient similarity, which is discussed in Preliminary Studies) may 2.96 be necessary in order to achieve the best results in this application. 2.97 We are aware of six existing efforts to find marker genes using spatial gene expression data using automated methods. 2.98 -[8 ] mentions the possibility of constructing a spatial region for each gene, and then, for each anatomical structure of 2.99 +[11 ] mentions the possibility of constructing a spatial region for each gene, and then, for each anatomical structure of 2.100 interest, computing what proportion of this structure is covered by the gene&#8217;s spatial region. 2.101 -GeneAtlas[3] and EMAGE [18] allow the user to construct a search query by demarcating regions and then specifing 2.102 +GeneAtlas[5] and EMAGE [23] allow the user to construct a search query by demarcating regions and then specifing 2.103 either the strength of expression or the name of another gene or dataset whose expression pattern is to be matched. For the 2.104 similiarity score (match score) between two images (in this case, the query and the gene expression images), GeneAtlas uses 2.105 the sum of a weighted L1-norm distance between vectors whose components represent the number of cells within a pixel3 2.106 -whose expression is within four discretization levels. EMAGE uses Jaccard similarity, which is equal to the number of true 2.107 -pixels in the intersection of the two images, divided by the number of pixels in their union. Neither GeneAtlas nor EMAGE 2.108 +whose expression is within four discretization levels. EMAGE uses Jaccard similarity4. Neither GeneAtlas nor EMAGE 2.109 allow one to search for combinations of genes that define a region in concert but not separately. 2.110 -[10 ] describes AGEA, &#8221;Anatomic Gene Expression Atlas&#8221;. AGEA has three components: 2.111 -&#x2219;Gene Finder: The user selects a seed voxel and the system (1) chooses a cluster which includes the seed voxel, (2) 2.112 -yields a list of genes which are overexpressed in that cluster. (note: the ABA website also contains pre-prepared lists 2.113 -of overexpressed genes for selected structures) 2.114 -&#x2219;Correlation: The user selects a seed voxel and the system then shows the user how much correlation there is between 2.115 -the gene expression profile of the seed voxel and every other voxel. 2.116 -&#x2219;Clusters: will be described later 2.117 +[13 ] describes AGEA, &#8221;Anatomic Gene Expression Atlas&#8221;. AGEA has three components. Gene Finder: The user 2.118 +selects a seed voxel and the system (1) chooses a cluster which includes the seed voxel, (2) yields a list of genes which are 2.119 +overexpressed in that cluster. (note: the ABA website also contains pre-prepared lists of overexpressed genes for selected 2.120 +structures). Correlation: The user selects a seed voxel and the system then shows the user how much correlation there is 2.121 +between the gene expression profile of the seed voxel and every other voxel. Clusters: will be described later 2.122 Gene Finder is different from our Aim 1 in at least three ways. First, Gene Finder finds only single genes, whereas we 2.123 will also look for combinations of genes. Second, gene finder can only use overexpression as a marker, whereas we will also 2.124 -search for underexpression. Third, Gene Finder uses a simple pointwise score4, whereas we will also use geometric scores 2.125 +search for underexpression. Third, Gene Finder uses a simple pointwise score5, whereas we will also use geometric scores 2.126 such as gradient similarity (described in Preliminary Studies). Figures 4, 2, and 3 in the Preliminary Studies section contains 2.127 evidence that each of our three choices is the right one. 2.128 -[4 ] looks at the mean expression level of genes within anatomical regions, and applies a Student&#8217;s t-test with Bonferroni 2.129 +[6 ] looks at the mean expression level of genes within anatomical regions, and applies a Student&#8217;s t-test with Bonferroni 2.130 correction to determine whether the mean expression level of a gene is significantly higher in the target region. Like AGEA, 2.131 -_________________________________________ 2.132 - 2By &#8220;fundamentally spatial&#8221; we mean that there is information from a large number of spatial locations indexed by spatial coordinates; not 2.133 -just data which have only a few different locations or which is indexed by anatomical label. 2.134 - 3Actually, many of these projects use quadrilaterals instead of square pixels; but we will refer to them as pixels for simplicity. 2.135 - 4&#8220;Expression energy ratio&#8221;, which captures overexpression. 2.136 this is a pointwise measure (only the mean expression level per pixel is being analyzed), it is not being used to look for 2.137 underexpression, and does not look for combinations of genes. 2.138 -[7 ] describes a technique to find combinations of marker genes to pick out an anatomical region. They use an evolutionary 2.139 +[9 ] describes a technique to find combinations of marker genes to pick out an anatomical region. They use an evolutionary 2.140 algorithm to evolve logical operators which combine boolean (thresholded) images in order to match a target image. Their 2.141 match score is Jaccard similarity. 2.142 In summary, there has been fruitful work on finding marker genes, but only one of the previous projects explores 2.143 @@ -141,6 +127,12 @@ 2.144 scoring methods. 2.145 Aim 2: From gene expression data, discover a map of regions 2.146 Machine learning terminology: clustering 2.147 +_ 2.148 + 2By &#8220;fundamentally spatial&#8221; we mean that there is information from a large number of spatial locations indexed by spatial coordinates; not 2.149 +just data which have only a few different locations or which is indexed by anatomical label. 2.150 + 3Actually, many of these projects use quadrilaterals instead of square pixels; but we will refer to them as pixels for simplicity. 2.151 + 4the number of true pixels in the intersection of the two images, divided by the number of pixels in their union. 2.152 + 5&#8220;Expression energy ratio&#8221;, which captures overexpression. 2.153 If one is given a dataset consisting merely of instances, with no class labels, then analysis of the dataset is referred to as 2.154 unsupervised learning in the jargon of machine learning. One thing that you can do with such a dataset is to group instances 2.155 together. A set of similar instances is called a cluster, and the activity of finding grouping the data into clusters is called 2.156 @@ -150,135 +142,123 @@ 2.157 from the same anatomical region have similar gene expression profiles, at least compared to the other regions. This means 2.158 that clustering voxels is the same as finding potential regions; we seek a partitioning of the voxels into regions, that is, into 2.159 clusters of voxels with similar gene expression. 2.160 -It is desirable to determine not just one set of regions, but also how these regions relate to each other, if at all; perhaps 2.161 -some of the regions are more similar to each other than to the rest, suggesting that, although at a fine spatial scale they 2.162 -could be considered separate, on a coarser spatial scale they could be grouped together into one large region. This suggests 2.163 -the outcome of clustering may be a hierarchial tree of clusters, rather than a single set of clusters which partition the voxels. 2.164 -This is called hierarchial clustering. 2.165 -Similarity scores 2.166 -A crucial choice when designing a clustering method is how to measure similarity, across either pairs of instances, or 2.167 -clusters, or both. There is much overlap between scoring methods for feature selection (discussed above under Aim 1) and 2.168 -scoring methods for similarity. 2.169 -Spatially contiguous clusters; image segmentation 2.170 -We have shown that aim 2 is a type of clustering task. In fact, it is a special type of clustering task because we have 2.171 -an additional constraint on clusters; voxels grouped together into a cluster must be spatially contiguous. In Preliminary 2.172 -Studies, we show that one can get reasonable results without enforcing this constraint; however, we plan to compare these 2.173 -results against other methods which guarantee contiguous clusters. 2.174 -Perhaps the biggest source of continguous clustering algorithms is the field of computer vision, which has produced a 2.175 -variety of image segmentation algorithms. Image segmentation is the task of partitioning the pixels in a digital image into 2.176 -clusters, usually contiguous clusters. Aim 2 is similar to an image segmentation task. There are two main differences; in 2.177 -our task, there are thousands of color channels (one for each gene), rather than just three. However, there are imaging 2.178 -tasks which use more than three colors, for example multispectral imaging and hyperspectral imaging, which are often used 2.179 -to process satellite imagery. A more crucial difference is that there are various cues which are appropriate for detecting 2.180 -sharp object boundaries in a visual scene but which are not appropriate for segmenting abstract spatial data such as gene 2.181 -expression. Although many image segmentation algorithms can be expected to work well for segmenting other sorts of 2.182 -spatially arranged data, some of these algorithms are specialized for visual images. 2.183 +It is desirable to determine not just one set of regions, but also how these regions relate to each other. The outcome of 2.184 +clustering may be a hierarchial tree of clusters, rather than a single set of clusters which partition the voxels. This is called 2.185 +hierarchial clustering. 2.186 +Similarity scores A crucial choice when designing a clustering method is how to measure similarity, across either pairs 2.187 +of instances, or clusters, or both. There is much overlap between scoring methods for feature selection (discussed above 2.188 +under Aim 1) and scoring methods for similarity. 2.189 +Spatially contiguous clusters; image segmentation We have shown that aim 2 is a type of clustering task. In fact, 2.190 +it is a special type of clustering task because we have an additional constraint on clusters; voxels grouped together into a 2.191 +cluster must be spatially contiguous. In Preliminary Studies, we show that one can get reasonable results without enforcing 2.192 +this constraint; however, we plan to compare these results against other methods which guarantee contiguous clusters. 2.193 +Image segmentation is the task of partitioning the pixels in a digital image into clusters, usually contiguous clusters. Aim 2.194 +2 is similar to an image segmentation task. There are two main differences; in our task, there are thousands of color channels 2.195 +(one for each gene), rather than just three6. A more crucial difference is that there are various cues which are appropriate 2.196 +for detecting sharp object boundaries in a visual scene but which are not appropriate for segmenting abstract spatial data 2.197 +such as gene expression. Although many image segmentation algorithms can be expected to work well for segmenting other 2.198 +sorts of spatially arranged data, some of these algorithms are specialized for visual images. 2.199 Dimensionality reduction In this section, we discuss reducing the length of the per-pixel gene expression feature 2.200 vector. By &#8220;dimension&#8221;, we mean the dimension of this vector, not the spatial dimension of the underlying data. 2.201 Unlike aim 1, there is no externally-imposed need to select only a handful of informative genes for inclusion in the 2.202 -instances. However, some clustering algorithms perform better on small numbers of features. There are techniques which 2.203 +instances. However, some clustering algorithms perform better on small numbers of features7. There are techniques which 2.204 &#8220;summarize&#8221; a larger number of features using a smaller number of features; these techniques go by the name of feature 2.205 extraction or dimensionality reduction. The small set of features that such a technique yields is called the reduced feature 2.206 -set. After the reduced feature set is created, the instances may be replaced by reduced instances, which have as their features 2.207 -the reduced feature set rather than the original feature set of all gene expression levels. Note that the features in the reduced 2.208 -feature set do not necessarily correspond to genes; each feature in the reduced set may be any function of the set of gene 2.209 -expression levels. 2.210 -Dimensionality reduction before clustering is useful on large datasets. First, because the number of features in the 2.211 -reduced dataset is less than in the original dataset, the running time of clustering algorithms may be much less. Second, it 2.212 -is thought that some clustering algorithms may give better results on reduced data. 2.213 -Another use for dimensionality reduction is to visualize the relationships between regions after clustering. For example, 2.214 -one might want to make a 2-D plot upon which each region is represented by a single point, and with the property that 2.215 -regions with similar gene expression profiles should be nearby on the plot (that is, the property that distance between 2.216 -pairs of points in the plot should be proportional to some measure of dissimilarity in gene expression). It is likely that no 2.217 -arrangement of the points on a 2-D plan will exactly satisfy this property; however, dimensionality reduction techniques allow 2.218 -one to find arrangements of points that approximately satisfy that property. Note that in this application, dimensionality 2.219 -reduction is being applied after clustering; whereas in the previous paragraph, we were talking about using dimensionality 2.220 -reduction before clustering. 2.221 -Clustering genes rather than voxels 2.222 -Although the ultimate goal is to cluster the instances (voxels or pixels), one strategy to achieve this goal is to first cluster 2.223 -the features (genes). There are two ways that clusters of genes could be used. 2.224 +set. Note that the features in the reduced feature set do not necessarily correspond to genes; each feature in the reduced set 2.225 +may be any function of the set of gene expression levels. 2.226 +Clustering genes rather than voxels Although the ultimate goal is to cluster the instances (voxels or pixels), one 2.227 +strategy to achieve this goal is to first cluster the features (genes). There are two ways that clusters of genes could be used. 2.228 Gene clusters could be used as part of dimensionality reduction: rather than have one feature for each gene, we could 2.229 have one reduced feature for each gene cluster. 2.230 Gene clusters could also be used to directly yield a clustering on instances. This is because many genes have an expression 2.231 pattern which seems to pick out a single, spatially continguous region. Therefore, it seems likely that an anatomically 2.232 -interesting region will have multiple genes which each individually pick it out5. This suggests the following procedure: 2.233 +interesting region will have multiple genes which each individually pick it out8. This suggests the following procedure: 2.234 cluster together genes which pick out similar regions, and then to use the more popular common regions as the final clusters. 2.235 In Preliminary Studies, Figure 7, we show that a number of anatomically recognized cortical regions, as well as some 2.236 &#8220;superregions&#8221; formed by lumping together a few regions, are associated with gene clusters in this fashion. 2.237 The task of clustering both the instances and the features is called co-clustering, and there are a number of co-clustering 2.238 algorithms. 2.239 +________________________________ 2.240 + 6There are imaging tasks which use more than three colors, for example multispectral imaging and hyperspectral imaging, which are often 2.241 +used to process satellite imagery. 2.242 + 7First, because the number of features in the reduced dataset is less than in the original dataset, the running time of clustering algorithms 2.243 +may be much less. Second, it is thought that some clustering algorithms may give better results on reduced data. 2.244 + 8This would seem to contradict our finding in aim 1 that some cortical areas are combinatorially coded by multiple genes. However, it is 2.245 +possible that the currently accepted cortical maps divide the cortex into regions which are unnatural from the point of view of gene expression; 2.246 +perhaps there is some other way to map the cortex for which each region can be identified by single genes. Another possibility is that, although 2.247 +the cluster prototype fits an anatomical region, the individual genes are each somewhat different from the prototype. 2.248 Related work 2.249 -We are aware of five existing efforts to cluster spatial gene expression data. 2.250 -[15 ] describes an analysis of the anatomy of the hippocampus using the ABA dataset. In addition to manual analysis, 2.251 +Some researchers have attempted to parcellate cortex on the basis of non-gene expression data. For example, [15], [2], [16], 2.252 +and [1 ] associate spots on the cortex with the radial profile9 of response to some stain ([10] uses MRI), extract features from 2.253 +this profile, and then use similarity between surface pixels to cluster. Features used include statistical moments, wavelets, 2.254 +and the excess mass functional. Some of these features are motivated by the presence of tangential lines of stain intensity 2.255 +which correspond to laminar structure. Some methods use standard clustering procedures, whereas others make use of the 2.256 +spatial nature of the data to look for sudden transitions, which are identified as areal borders. 2.257 +[20 ] describes an analysis of the anatomy of the hippocampus using the ABA dataset. In addition to manual analysis, 2.258 two clustering methods were employed, a modified Non-negative Matrix Factorization (NNMF), and a hierarchial recursive 2.259 bifurcation clustering scheme based on correlation as the similarity score. The paper yielded impressive results, proving 2.260 -the usefulness of computational genomic anatomy. We have run NNMF on the cortical dataset6 and while the results are 2.261 +the usefulness of computational genomic anatomy. We have run NNMF on the cortical dataset10 and while the results are 2.262 promising, they also demonstrate that NNMF is not necessarily the best dimensionality reduction method for this application 2.263 (see Preliminary Studies, Figure 6). 2.264 -AGEA[10] includes a preset hierarchial clustering of voxels based on a recursive bifurcation algorithm with correlation 2.265 -as the similarity metric. EMAGE[18] allows the user to select a dataset from among a large number of alternatives, or by 2.266 +AGEA[13] includes a preset hierarchial clustering of voxels based on a recursive bifurcation algorithm with correlation 2.267 +as the similarity metric. EMAGE[23] allows the user to select a dataset from among a large number of alternatives, or by 2.268 running a search query, and then to cluster the genes within that dataset. EMAGE clusters via hierarchial complete linkage 2.269 clustering with un-centred correlation as the similarity score. 2.270 -[4 ] clustered genes, starting out by selecting 135 genes out of 20,000 which had high variance over voxels and which were 2.271 +[6 ] clustered genes, starting out by selecting 135 genes out of 20,000 which had high variance over voxels and which were 2.272 highly correlated with many other genes. They computed the matrix of (rank) correlations between pairs of these genes, and 2.273 ordered the rows of this matrix as follows: &#8220;the first row of the matrix was chosen to show the strongest contrast between 2.274 the highest and lowest correlation coefficient for that row. The remaining rows were then arranged in order of decreasing 2.275 similarity using a least squares metric&#8221;. The resulting matrix showed four clusters. For each cluster, prototypical spatial 2.276 expression patterns were created by averaging the genes in the cluster. The prototypes were analyzed manually, without 2.277 -clustering voxels 2.278 -In an interesting twist, [7] applies their technique for finding combinations of marker genes for the purpose of clustering 2.279 -genes around a &#8220;seed gene&#8221;. The way they do this is by using the pattern of expression of the seed gene as the target image, 2.280 -and then searching for other genes which can be combined to reproduce this pattern. Those other genes which are found 2.281 -are considered to be related to the seed. The same team also describes a method[17] for finding &#8220;association rules&#8221; such as, 2.282 -&#8220;if this voxel is expressed in by any gene, then that voxel is probably also expressed in by the same gene&#8221;. This could be 2.283 -useful as part of a procedure for clustering voxels. 2.284 +clustering voxels. 2.285 +[9 ] applies their technique for finding combinations of marker genes for the purpose of clustering genes around a &#8220;seed 2.286 +gene&#8221;. They do this by using the pattern of expression of the seed gene as the target image, and then searching for other 2.287 +genes which can be combined to reproduce this pattern. Other genes which are found are considered to be related to the 2.288 +seed. The same team also describes a method[22] for finding &#8220;association rules&#8221; such as, &#8220;if this voxel is expressed in by 2.289 +any gene, then that voxel is probably also expressed in by the same gene&#8221;. This could be useful as part of a procedure for 2.290 +clustering voxels. 2.291 In summary, although these projects obtained clusterings, there has not been much comparison between different algo- 2.292 -rithms or scoring methods, so it is likely that the best clustering method for this application has not yet been found. Also, 2.293 -none of these projects did a separate dimensionality reduction step before clustering pixels, none tried to cluster genes first 2.294 -in order to guide automated clustering of pixels into spatial regions, and none used co-clustering algorithms. 2.295 -_________________________________________ 2.296 - 5This would seem to contradict our finding in aim 1 that some cortical areas are combinatorially coded by multiple genes. However, it is 2.297 -possible that the currently accepted cortical maps divide the cortex into regions which are unnatural from the point of view of gene expression; 2.298 -perhaps there is some other way to map the cortex for which each region can be identified by single genes. Another possibility is that, although 2.299 -the cluster prototype fits an anatomical region, the individual genes are each somewhat different from the prototype. 2.300 - 6We ran &#8220;vanilla&#8221; NNMF, whereas the paper under discussion used a modified method. Their main modification consisted of adding a soft 2.301 -spatial contiguity constraint. However, on our dataset, NNMF naturally produced spatially contiguous clusters, so no additional constraint was 2.302 -needed. The paper under discussion also mentions that they tried a hierarchial variant of NNMF, which we have not yet tried. 2.303 -Aim 3 2.304 +rithms or scoring methods, so it is likely that the best clustering method for this application has not yet been found. The 2.305 +projects using gene expression on cortex did not attempt to make use of the radial profile of gene expression. Also, none of 2.306 +these projects did a separate dimensionality reduction step before clustering pixels, none tried to cluster genes first in order 2.307 +to guide automated clustering of pixels into spatial regions, and none used co-clustering algorithms. 2.308 +Aim 3: apply the methods developed to the cerebral cortex 2.309 Background 2.310 The cortex is divided into areas and layers. Because of the cortical columnar organization, the parcellation of the cortex 2.311 into areas can be drawn as a 2-D map on the surface of the cortex. In the third dimension, the boundaries between the 2.312 areas continue downwards into the cortical depth, perpendicular to the surface. The layer boundaries run parallel to the 2.313 -surface. One can picture an area of the cortex as a slice of a six-layered cake7. 2.314 -Although it is known that different cortical areas have distinct roles in both normal functioning and in disease processes, 2.315 -there are no known marker genes for most cortical areas. When it is necessary to divide a tissue sample into cortical areas, 2.316 -this is a manual process that requires a skilled human to combine multiple visual cues and interpret them in the context of 2.317 -their approximate location upon the cortical surface. 2.318 +surface. One can picture an area of the cortex as a slice of a six-layered cake11. 2.319 +It is known that different cortical areas have distinct roles in both normal functioning and in disease processes, yet there 2.320 +are no known marker genes for most cortical areas. When it is necessary to divide a tissue sample into cortical areas, this is 2.321 +a manual process that requires a skilled human to combine multiple visual cues and interpret them in the context of their 2.322 +approximate location upon the cortical surface. 2.323 Even the questions of how many areas should be recognized in cortex, and what their arrangement is, are still not 2.324 completely settled. A proposed division of the cortex into areas is called a cortical map. In the rodent, the lack of a single 2.325 -agreed-upon map can be seen by contrasting the recent maps given by Swanson[14] on the one hand, and Paxinos and 2.326 -Franklin[11] on the other. While the maps are certainly very similar in their general arrangement, significant differences 2.327 -remain in the details. 2.328 +agreed-upon map can be seen by contrasting the recent maps given by Swanson[19] on the one hand, and Paxinos and 2.329 +Franklin[14] on the other. While the maps are certainly very similar in their general arrangement, significant differences 2.330 +remain. 2.331 The Allen Mouse Brain Atlas dataset 2.332 +__ 2.333 + 9A radial profile is a profile along a line perpendicular to the cortical surface. 2.334 + 10We ran &#8220;vanilla&#8221; NNMF, whereas the paper under discussion used a modified method. Their main modification consisted of adding a soft 2.335 +spatial contiguity constraint. However, on our dataset, NNMF naturally produced spatially contiguous clusters, so no additional constraint was 2.336 +needed. The paper under discussion also mentions that they tried a hierarchial variant of NNMF, which we have not yet tried. 2.337 + 11Outside of isocortex, the number of layers varies. 2.338 The Allen Mouse Brain Atlas (ABA) data were produced by doing in-situ hybridization on slices of male, 56-day-old 2.339 C57BL/6J mouse brains. Pictures were taken of the processed slice, and these pictures were semi-automatically analyzed 2.340 -in order to create a digital measurement of gene expression levels at each location in each slice. Per slice, cellular spatial 2.341 -resolution is achieved. Using this method, a single physical slice can only be used to measure one single gene; many different 2.342 -mouse brains were needed in order to measure the expression of many genes. 2.343 -Next, an automated nonlinear alignment procedure located the 2D data from the various slices in a single 3D coordinate 2.344 +to create a digital measurement of gene expression levels at each location in each slice. Per slice, cellular spatial resolution 2.345 +is achieved. Using this method, a single physical slice can only be used to measure one single gene; many different mouse 2.346 +brains were needed in order to measure the expression of many genes. 2.347 +An automated nonlinear alignment procedure located the 2D data from the various slices in a single 3D coordinate 2.348 system. In the final 3D coordinate system, voxels are cubes with 200 microns on a side. There are 67x41x58 = 159,326 2.349 -voxels in the 3D coordinate system, of which 51,533 are in the brain[10]. 2.350 -Mus musculus, the common house mouse, is thought to contain about 22,000 protein-coding genes[20]. The ABA contains 2.351 -data on about 20,000 genes in sagittal sections, out of which over 4,000 genes are also measured in coronal sections. Our 2.352 -dataset is derived from only the coronal subset of the ABA, because the sagittal data do not cover the entire cortex, and 2.353 -also has greater registration error[10]. Genes were selected by the Allen Institute for coronal sectioning based on, &#8220;classes 2.354 -of known neuroscientific interest... or through post hoc identification of a marked non-ubiquitous expression pattern&#8221;[10]. 2.355 -The ABA is not the only large public spatial gene expression dataset. Other such resources include GENSAT[6], 2.356 -GenePaint[19], its sister project GeneAtlas[3], BGEM[9], EMAGE[18], EurExpress8, EADHB9, MAMEP10, Xenbase11, 2.357 -ZFIN[13], Aniseed12, VisiGene13, GEISHA[2], Fruitfly.org[16], COMPARE14 GXD[12], GEO[1]15. With the exception of 2.358 -the ABA, GenePaint, and EMAGE, most of these resources have not (yet) extracted the expression intensity from the ISH 2.359 -images and registered the results into a single 3-D space, and to our knowledge only ABA and EMAGE make this form of 2.360 -data available for public download from the website16. Many of these resources focus on developmental gene expression. 2.361 +voxels in the 3D coordinate system, of which 51,533 are in the brain[13]. 2.362 +Mus musculus is thought to contain about 22,000 protein-coding genes[25]. The ABA contains data on about 20,000 2.363 +genes in sagittal sections, out of which over 4,000 genes are also measured in coronal sections. Our dataset is derived from 2.364 +only the coronal subset of the ABA12. 2.365 +The ABA is not the only large public spatial gene expression dataset13. With the exception of the ABA, GenePaint, and 2.366 +EMAGE, most of the other resources have not (yet) extracted the expression intensity from the ISH images and registered 2.367 +the results into a single 3-D space, and to our knowledge only ABA and EMAGE make this form of data available for public 2.368 +download from the website14. Many of these resources focus on developmental gene expression. 2.369 Significance 2.370 The method developed in aim (1) will be applied to each cortical area to find a set of marker genes such that the 2.371 combinatorial expression pattern of those genes uniquely picks out the target area. Finding marker genes will be useful for 2.372 @@ -288,32 +268,20 @@ 2.373 ical methods. In addition to finding markers for each individual cortical areas, we will find a small panel of genes that can 2.374 find many of the areal boundaries at once. This panel of marker genes will allow the development of an ISH protocol that 2.375 will allow experimenters to more easily identify which anatomical areas are present in small samples of cortex. 2.376 -The method developed in aim (2) will provide a genoarchitectonic viewpoint that will contribute to the creation of 2.377 -a better map. The development of present-day cortical maps was driven by the application of histological stains. It is 2.378 -conceivable that if a different set of stains had been available which identified a different set of features, then the today&#8217;s 2.379 -cortical maps would have come out differently. Since the number of classes of stains is small compared to the number of 2.380 -_________________________________________ 2.381 - 7Outside of isocortex, the number of layers varies. 2.382 - 8http://www.eurexpress.org/ee/; EurExpress data are also entered into EMAGE 2.383 - 9http://www.ncl.ac.uk/ihg/EADHB/database/EADHB_database.html 2.384 - 10http://mamep.molgen.mpg.de/index.php 2.385 - 11http://xenbase.org/ 2.386 - 12http://aniseed-ibdm.univ-mrs.fr/ 2.387 - 13http://genome.ucsc.edu/cgi-bin/hgVisiGene ; includes data from some the other listed data sources 2.388 - 14http://compare.ibdml.univ-mrs.fr/ 2.389 - 15GXD and GEO contain spatial data but also non-spatial data. All GXD spatial data are also in EMAGE. 2.390 - 16without prior offline registration 2.391 -genes, it is likely that there are many repeated, salient spatial patterns in the gene expression which have not yet been 2.392 -captured by any stain. Therefore, current ideas about cortical anatomy need to incorporate what we can learn from looking 2.393 -at the patterns of gene expression. 2.394 +The method developed in aim (2) will provide a genoarchitectonic viewpoint that will contribute to the creation of a 2.395 +better map. The development of present-day cortical maps was driven by the application of histological stains. If a different 2.396 +set of stains had been available which identified a different set of features, then today&#8217;s cortical maps may have come out 2.397 +differently. It is likely that there are many repeated, salient spatial patterns in the gene expression which have not yet been 2.398 +captured by any stain. Therefore, cortical anatomy needs to incorporate what we can learn from looking at the patterns of 2.399 +gene expression. 2.400 While we do not here propose to analyze human gene expression data, it is conceivable that the methods we propose to 2.401 develop could be used to suggest modifications to the human cortical map as well. 2.402 Related work 2.403 -[10 ] describes the application of AGEA to the cortex. The paper describes interesting results on the structure of correlations 2.404 +[13 ] describes the application of AGEA to the cortex. The paper describes interesting results on the structure of correlations 2.405 between voxel gene expression profiles within a handful of cortical areas. However, this sort of analysis is not related to either 2.406 of our aims, as it neither finds marker genes, nor does it suggest a cortical map based on gene expression data. Neither of 2.407 the other components of AGEA can be applied to cortical areas; AGEA&#8217;s Gene Finder cannot be used to find marker genes 2.408 -for the cortical areas; and AGEA&#8217;s hierarchial clustering does not produce clusters corresponding to the cortical areas17. 2.409 +for the cortical areas; and AGEA&#8217;s hierarchial clustering does not produce clusters corresponding to the cortical areas15. 2.410 In summary, for all three aims, (a) only one of the previous projects explores combinations of marker genes, (b) there has 2.411 been almost no comparison of different algorithms or scoring methods, and (c) there has been no work on computationally 2.412 finding marker genes for cortical areas, or on finding a hierarchial clustering that will yield a map of cortical areas de novo 2.413 @@ -321,55 +289,64 @@ 2.414 Our project is guided by a concrete application with a well-specified criterion of success (how well we can find marker 2.415 genes for / reproduce the layout of cortical areas), which will provide a solid basis for comparing different methods. 2.416 _________________________________________ 2.417 - 17In both cases, the root cause is that pairwise correlations between the gene expression of voxels in different areas but the same layer are 2.418 -often stronger than pairwise correlations between the gene expression of voxels in different layers but the same area. Therefore, a pairwise voxel 2.419 -correlation clustering algorithm will tend to create clusters representing cortical layers, not areas. This is why the hierarchial clustering does not 2.420 -find cortical areas (there are clusters which presumably correspond to the intersection of a layer and an area, but since one area will have many 2.421 -layer-area intersection clusters, further work is needed to make sense of these). The reason that Gene Finder cannot the find marker genes for 2.422 -cortical areas is that in Gene Finder, although the user chooses a seed voxel, Gene Finder chooses the ROI for which genes will be found, and it 2.423 -creates that ROI by (pairwise voxel correlation) clustering around the seed. 2.424 + 12The sagittal data do not cover the entire cortex, and also have greater registration error[13]. Genes were selected by the Allen Institute for 2.425 +coronal sectioning based on, &#8220;classes of known neuroscientific interest... or through post hoc identification of a marked non-ubiquitous expression 2.426 +pattern&#8221;[13]. 2.427 + 13Other such resources include GENSAT[8], GenePaint[24], its sister project GeneAtlas[5], BGEM[12], EMAGE[23], EurExpress (http://www. 2.428 +eurexpress.org/ee/; EurExpress data are also entered into EMAGE), EADHB (http://www.ncl.ac.uk/ihg/EADHB/database/EADHB_database. 2.429 +html), MAMEP (http://mamep.molgen.mpg.de/index.php), Xenbase (http://xenbase.org/), ZFIN[18], Aniseed (http://aniseed-ibdm. 2.430 +univ-mrs.fr/), VisiGene (http://genome.ucsc.edu/cgi-bin/hgVisiGene ; includes data from some of the other listed data sources), GEISHA[4], 2.431 +Fruitfly.org[21], COMPARE (http://compare.ibdml.univ-mrs.fr/), GXD[17], GEO[3] (GXD and GEO contain spatial data but also non-spatial 2.432 +data. All GXD spatial data are also in EMAGE.) 2.433 + 14without prior offline registration 2.434 + 15In both cases, the cause is that pairwise correlations between the gene expression of voxels in different areas but the same layer are often stronger 2.435 +than pairwise correlations between the gene expression of voxels in different layers but the same area. Therefore, a pairwise voxel correlation 2.436 +clustering algorithm will tend to create clusters representing cortical layers, not areas (there may be clusters which presumably correspond to the 2.437 +intersection of a layer and an area, but since one area will have many layer-area intersection clusters, further work is needed to make sense of 2.438 +these). The reason that Gene Finder cannot the find marker genes for cortical areas is that, although the user chooses a seed voxel, Gene Finder 2.439 +chooses the ROI for which genes will be found, and it creates that ROI by (pairwise voxel correlation) clustering around the seed. 2.440 Preliminary Studies 2.441 - 2.442 - 2.443 -Figure 1: Top row: Genes Nfic and 2.444 -A930001M12Rik are the most correlated with 2.445 -area SS (somatosensory cortex). Bottom row: 2.446 -Genes C130038G02Rik and Cacna1i are those 2.447 -with the best fit using logistic regression. Within 2.448 -each picture, the vertical axis roughly corresponds 2.449 -to anterior at the top and posterior at the bot- 2.450 -tom, and the horizontal axis roughly corresponds 2.451 -to medial at the left and lateral at the right. The 2.452 -red outline is the boundary of region SS. Pixels are 2.453 -colored according to correlation, with red meaning 2.454 -high correlation and blue meaning low. Format conversion between SEV, MATLAB, NIFTI 2.455 - We have created software to (politely) download all of the SEV files18 2.456 - from the Allen Institute website. We have also created software to con- 2.457 - vert between the SEV, MATLAB, and NIFTI file formats, as well as 2.458 - some of Caret&#8217;s file formats. 2.459 - Flatmap of cortex 2.460 - We downloaded the ABA data and applied a mask to select only those 2.461 - voxels which belong to cerebral cortex. We divided the cortex into hemi- 2.462 - spheres. 2.463 - Using Caret[5], we created a mesh representation of the surface of the 2.464 - selected voxels. For each gene, for each node of the mesh, we calculated 2.465 - an average of the gene expression of the voxels &#8220;underneath&#8221; that mesh 2.466 - node. We then flattened the cortex, creating a two-dimensional mesh. 2.467 - We sampled the nodes of the irregular, flat mesh in order to create 2.468 - a regular grid of pixel values. We converted this grid into a MATLAB 2.469 - matrix. 2.470 - We manually traced the boundaries of each of 49 cortical areas from 2.471 - the ABA coronal reference atlas slides. We then converted these manual 2.472 - traces into Caret-format regional boundary data on the mesh surface. 2.473 - We projected the regions onto the 2-d mesh, and then onto the grid, and 2.474 - then we converted the region data into MATLAB format. 2.475 - At this point, the data are in the form of a number of 2-D matrices, 2.476 - all in registration, with the matrix entries representing a grid of points 2.477 - (pixels) over the cortical surface: 2.478 - &#x2219; A 2-D matrix whose entries represent the regional label associated with 2.479 - each surface pixel 2.480 - &#x2219; For each gene, a 2-D matrix whose entries represent the average expres- 2.481 - sion level underneath each surface pixel 2.482 + 2.483 + 2.484 +Figure 1: Top row: Genes Nfic and 2.485 +A930001M12Rik are the most correlated 2.486 +with area SS (somatosensory cortex). Bot- 2.487 +tom row: Genes C130038G02Rik and 2.488 +Cacna1i are those with the best fit using 2.489 +logistic regression. Within each picture, the 2.490 +vertical axis roughly corresponds to anterior 2.491 +at the top and posterior at the bottom, and 2.492 +the horizontal axis roughly corresponds to 2.493 +medial at the left and lateral at the right. 2.494 +The red outline is the boundary of region 2.495 +SS. Pixels are colored according to correla- 2.496 +tion, with red meaning high correlation and 2.497 +blue meaning low. Format conversion between SEV, MATLAB, NIFTI 2.498 + We have created software to (politely) download all of the SEV files16 from 2.499 + the Allen Institute website. We have also created software to convert between 2.500 + the SEV, MATLAB, and NIFTI file formats, as well as some of Caret&#8217;s file 2.501 + formats. 2.502 + Flatmap of cortex 2.503 + We downloaded the ABA data and applied a mask to select only those voxels 2.504 + which belong to cerebral cortex. We divided the cortex into hemispheres. 2.505 + Using Caret[7], we created a mesh representation of the surface of the se- 2.506 + lected voxels. For each gene, for each node of the mesh, we calculated an 2.507 + average of the gene expression of the voxels &#8220;underneath&#8221; that mesh node. We 2.508 + then flattened the cortex, creating a two-dimensional mesh. 2.509 + We sampled the nodes of the irregular, flat mesh in order to create a regular 2.510 + grid of pixel values. We converted this grid into a MATLAB matrix. 2.511 + We manually traced the boundaries of each of 49 cortical areas from the 2.512 + ABA coronal reference atlas slides. We then converted these manual traces 2.513 + into Caret-format regional boundary data on the mesh surface. We projected 2.514 + the regions onto the 2-d mesh, and then onto the grid, and then we converted 2.515 + the region data into MATLAB format. 2.516 + At this point, the data are in the form of a number of 2-D matrices, all in 2.517 + registration, with the matrix entries representing a grid of points (pixels) over 2.518 + the cortical surface: 2.519 + &#x2219; A 2-D matrix whose entries represent the regional label associated with each 2.520 + surface pixel 2.521 + &#x2219; For each gene, a 2-D matrix whose entries represent the average expression 2.522 + level underneath each surface pixel 2.523 2.524 Figure 2: Gene Pitx2 2.525 is selectively underex- 2.526 @@ -385,168 +362,169 @@ 2.527 extracting the layer-specific datasets, we have extended Caret with routines that allow the depth 2.528 of the ROI for volume-to-surface projection to vary. 2.529 In the Research Plan, we describe how we will automatically locate the layer depths. For 2.530 - validation, we have manually demarcated the depth of the outer boundary of cortical layer 5 2.531 -throughout the cortex. 2.532 +validation, we have manually demarcated the depth of the outer boundary of cortical layer 5 throughout the cortex. 2.533 Feature selection and scoring methods 2.534 Underexpression of a gene can serve as a marker Underexpression of a gene can sometimes serve as a marker. See, 2.535 for example, Figure 2. 2.536 Correlation Recall that the instances are surface pixels, and consider the problem of attempting to classify each instance 2.537 as either a member of a particular anatomical area, or not. The target area can be represented as a boolean mask over the 2.538 surface pixels. 2.539 -_____________________________ 2.540 - 18SEV is a sparse format for spatial data. It is the format in which the ABA data is made available. 2.541 One class of feature selection scoring methods contains methods which calculate some sort of &#8220;match&#8221; between each gene 2.542 image and the target image. Those genes which match the best are good candidates for features. 2.543 +_________________________________________ 2.544 + 16SEV is a sparse format for spatial data. It is the format in which the ABA data is made available. 2.545 One of the simplest methods in this class is to use correlation as the match score. We calculated the correlation between 2.546 each gene and each cortical area. The top row of Figure 1 shows the three genes most correlated with area SS. 2.547 - 2.548 - 2.549 -Figure 3: The top row shows the two genes which 2.550 -(individually) best predict area AUD, according 2.551 -to logistic regression. The bottom row shows the 2.552 -two genes which (individually) best match area 2.553 -AUD, according to gradient similarity. From left 2.554 -to right and top to bottom, the genes are Ssr1, 2.555 -Efcbp1, Ptk7, and Aph1a. Conditional entropy An information-theoretic scoring method is 2.556 - to find features such that, if the features (gene expression levels) are 2.557 - known, uncertainty about the target (the regional identity) is reduced. 2.558 - Entropy measures uncertainty, so what we want is to find features such 2.559 - that the conditional distribution of the target has minimal entropy. The 2.560 - distribution to which we are referring is the probability distribution over 2.561 - the population of surface pixels. 2.562 - The simplest way to use information theory is on discrete data, so 2.563 - we discretized our gene expression data by creating, for each gene, five 2.564 - thresholded boolean masks of the gene data. For each gene, we created a 2.565 - boolean mask of its expression levels using each of these thresholds: the 2.566 - mean of that gene, the mean minus one standard deviation, the mean 2.567 - minus two standard deviations, the mean plus one standard deviation, 2.568 - the mean plus two standard deviations. 2.569 - Now, for each region, we created and ran a forward stepwise pro- 2.570 - cedure which attempted to find pairs of gene expression boolean masks 2.571 - such that the conditional entropy of the target area&#8217;s boolean mask, con- 2.572 - ditioned upon the pair of gene expression boolean masks, is minimized. 2.573 - This finds pairs of genes which are most informative (at least at these 2.574 - discretization thresholds) relative to the question, &#8220;Is this surface pixel 2.575 - a member of the target area?&#8221;. Its advantage over linear methods such 2.576 - as logistic regression is that it takes account of arbitrarily nonlinear re- 2.577 - lationships; for example, if the XOR of two variables predicts the target, 2.578 - conditional entropy would notice, whereas linear methods would not. 2.579 - 2.580 - 2.581 -Figure 4: Upper left: wwc1. Upper right: mtif2. 2.582 -Lower left: wwc1 + mtif2 (each pixel&#8217;s value on 2.583 -the lower left is the sum of the corresponding pix- 2.584 -els in the upper row). Gradient similarity We noticed that the previous two scoring 2.585 - methods, which are pointwise, often found genes whose pattern of ex- 2.586 - pression did not look similar in shape to the target region. For this 2.587 - reason we designed a non-pointwise local scoring method to detect when 2.588 - a gene had a pattern of expression which looked like it had a boundary 2.589 - whose shape is similar to the shape of the target region. We call this 2.590 - scoring method &#8220;gradient similarity&#8221;. 2.591 - One might say that gradient similarity attempts to measure how 2.592 - much the border of the area of gene expression and the border of the 2.593 - target region overlap. However, since gene expression falls off continu- 2.594 - ously rather than jumping from its maximum value to zero, the spatial 2.595 - pattern of a gene&#8217;s expression often does not have a discrete border. 2.596 - Therefore, instead of looking for a discrete border, we look for large 2.597 - gradients. Gradient similarity is a symmetric function over two images 2.598 - (i.e. two scalar fields). It is is high to the extent that matching pixels 2.599 - which have large values and large gradients also have gradients which 2.600 - are oriented in a similar direction. The formula is: 2.601 - &#x2211; 2.602 - pixel<img src="cmsy7-32.png" alt="&#x2208;" />pixels cos(abs(&#x2220;&#x2207;1 -&#x2220;&#x2207;2)) &#x22C5;|&#x2207;1| + |&#x2207;2| 2.603 + 2.604 + 2.605 +Figure 3: The top row shows the two genes 2.606 +which (individually) best predict area AUD, 2.607 +according to logistic regression. The bot- 2.608 +tom row shows the two genes which (indi- 2.609 +vidually) best match area AUD, according 2.610 +to gradient similarity. From left to right and 2.611 +top to bottom, the genes are Ssr1, Efcbp1, 2.612 +Ptk7, and Aph1a. Conditional entropy An information-theoretic scoring method is to find 2.613 + features such that, if the features (gene expression levels) are known, uncer- 2.614 + tainty about the target (the regional identity) is reduced. Entropy measures 2.615 + uncertainty, so what we want is to find features such that the conditional dis- 2.616 + tribution of the target has minimal entropy. The distribution to which we are 2.617 + referring is the probability distribution over the population of surface pixels. 2.618 + The simplest way to use information theory is on discrete data, so we 2.619 + discretized our gene expression data by creating, for each gene, five thresholded 2.620 + boolean masks of the gene data. For each gene, we created a boolean mask of 2.621 + its expression levels using each of these thresholds: the mean of that gene, the 2.622 + mean minus one standard deviation, the mean minus two standard deviations, 2.623 + the mean plus one standard deviation, the mean plus two standard deviations. 2.624 + Now, for each region, we created and ran a forward stepwise procedure 2.625 + which attempted to find pairs of gene expression boolean masks such that the 2.626 + conditional entropy of the target area&#8217;s boolean mask, conditioned upon the 2.627 + pair of gene expression boolean masks, is minimized. 2.628 + This finds pairs of genes which are most informative (at least at these dis- 2.629 + cretization thresholds) relative to the question, &#8220;Is this surface pixel a member 2.630 + of the target area?&#8221;. Its advantage over linear methods such as logistic regres- 2.631 + sion is that it takes account of arbitrarily nonlinear relationships; for example, 2.632 + if the XOR of two variables predicts the target, conditional entropy would 2.633 + notice, whereas linear methods would not. 2.634 + Gradient similarity We noticed that the previous two scoring methods, 2.635 +which are pointwise, often found genes whose pattern of expression did not look similar in shape to the target region. For 2.636 +this reason we designed a non-pointwise local scoring method to detect when a gene had a pattern of expression which 2.637 +looked like it had a boundary whose shape is similar to the shape of the target region. We call this scoring method &#8220;gradient 2.638 +similarity&#8221;. 2.639 + 2.640 + 2.641 +Figure 4: Upper left: wwc1. Upper right: 2.642 +mtif2. Lower left: wwc1 + mtif2 (each 2.643 +pixel&#8217;s value on the lower left is the sum of 2.644 +the corresponding pixels in the upper row). One might say that gradient similarity attempts to measure how much the 2.645 + border of the area of gene expression and the border of the target region over- 2.646 + lap. However, since gene expression falls off continuously rather than jumping 2.647 + from its maximum value to zero, the spatial pattern of a gene&#8217;s expression often 2.648 + does not have a discrete border. Therefore, instead of looking for a discrete 2.649 + border, we look for large gradients. Gradient similarity is a symmetric function 2.650 + over two images (i.e. two scalar fields). It is is high to the extent that matching 2.651 + pixels which have large values and large gradients also have gradients which 2.652 + are oriented in a similar direction. The formula is: 2.653 + &#x2211; 2.654 + pixel<img src="cmsy7-32.png" alt="&#x2208;" />pixels cos(abs(&#x2220;&#x2207;1 -&#x2220;&#x2207;2)) &#x22C5;|&#x2207;1| + |&#x2207;2| 2.655 2 &#x22C5; pixel_value1 + pixel_value2 2.656 2 2.657 - where &#x2207;1 and &#x2207;2 are the gradient vectors of the two images at the 2.658 -current pixel; &#x2220;&#x2207;i is the angle of the gradient of image i at the current pixel; |&#x2207;i| is the magnitude of the gradient of image 2.659 -i at the current pixel; and pixel_valuei is the value of the current pixel in image i. 2.660 -The intuition is that we want to see if the borders of the pattern in the two images are similar; if the borders are similar, 2.661 -then both images will have corresponding pixels with large gradients (because this is a border) which are oriented in a 2.662 -similar direction (because the borders are similar). 2.663 + where &#x2207;1 and &#x2207;2 are the gradient vectors of the two images at the current 2.664 + pixel; &#x2220;&#x2207;i is the angle of the gradient of image i at the current pixel; |&#x2207;i| is 2.665 + the magnitude of the gradient of image i at the current pixel; and pixel_valuei 2.666 + is the value of the current pixel in image i. 2.667 + The intuition is that we want to see if the borders of the pattern in the 2.668 + two images are similar; if the borders are similar, then both images will have 2.669 + corresponding pixels with large gradients (because this is a border) which are 2.670 +oriented in a similar direction (because the borders are similar). 2.671 Most of the genes in Figure 5 were identified via gradient similarity. 2.672 Gradient similarity provides information complementary to correlation 2.673 To show that gradient similarity can provide useful information that cannot be detected via pointwise analyses, consider 2.674 -Fig. 3. The top row of Fig. 3 displays the 3 genes which most match area AUD, according to a pointwise method19. The 2.675 -bottomrow displays the 3 genes which most match AUD according to a method which considers local geometry20 The 2.676 +Fig. 3. The top row of Fig. 3 displays the 3 genes which most match area AUD, according to a pointwise method17. The 2.677 +_________________________________________ 2.678 + 17For each gene, a logistic regression in which the response variable was whether or not a surface pixel was within area AUD, and the predictor 2.679 +variable was the value of the expression of the gene underneath that pixel. The resulting scores were used to rank the genes in terms of how well 2.680 +they predict area AUD. 2.681 +bottom row displays the 3 genes which most match AUD according to a method which considers local geometry18 The 2.682 pointwise method in the top row identifies genes which express more strongly in AUD than outside of it; its weakness is 2.683 that this includes many areas which don&#8217;t have a salient border matching the areal border. The geometric method identifies 2.684 genes whose salient expression border seems to partially line up with the border of AUD; its weakness is that this includes 2.685 genes which don&#8217;t express over the entire area. Genes which have high rankings using both pointwise and border criteria, 2.686 such as Aph1a in the example, may be particularly good markers. None of these genes are, individually, a perfect marker 2.687 for AUD; we deliberately chose a &#8220;difficult&#8221; area in order to better contrast pointwise with geometric methods. 2.688 -Areas which can be identified by single genes Using gradient similarity, we have already found single genes which 2.689 -roughly identify some areas and groupings of areas. For each of these areas, an example of a gene which roughly identifies 2.690 -it is shown in Figure 5. We have not yet cross-verified these genes in other atlases. 2.691 -In addition, there are a number of areas which are almost identified by single genes: COAa+NLOT (anterior part of 2.692 -cortical amygdalar area, nucleus of the lateral olfactory tract), ENT (entorhinal), ACAv (ventral anterior cingulate), VIS 2.693 -(visual), AUD (auditory). 2.694 -These results validate our expectation that the ABA dataset can be exploited to find marker genes for many cortical 2.695 -areas, while also validating the relevancy of our new scoring method, gradient similarity. 2.696 -Combinations of multiple genes are useful and necessary for some areas 2.697 -In Figure 4, we give an example of a cortical area which is not marked by any single gene, but which can be identified 2.698 -combinatorially. Acccording to logistic regression, gene wwc1 is the best fit single gene for predicting whether or not a 2.699 -pixel on the cortical surface belongs to the motor area (area MO). The upper-left picture in Figure 4 shows wwc1&#8217;s spatial 2.700 -expression pattern over the cortex. The lower-right boundary of MO is represented reasonably well by this gene, but the 2.701 -gene overshoots the upper-left boundary. This flattened 2-D representation does not show it, but the area corresponding 2.702 -to the overshoot is the medial surface of the cortex. MO is only found on the dorsal surface. Gene mtif2 is shown in the 2.703 -upper-right. Mtif2 captures MO&#8217;s upper-left boundary, but not its lower-right boundary. Mtif2 does not express very much 2.704 -on the medial surface. By adding together the values at each pixel in these two figures, we get the lower-left image. This 2.705 -combination captures area MO much better than any single gene. 2.706 -This shows that our proposal to develop a method to find combinations of marker genes is both possible and necessary. 2.707 -Feature selection integrated with prediction As noted earlier, in general, any predictive method can be used for 2.708 -feature selection by running it inside a stepwise wrapper. Also, some predictive methods integrate soft constraints on number 2.709 -of features used. Examples of both of these will be seen in the section &#8220;Multivariate Predictive methods&#8221;. 2.710 -Multivariate Predictive methods 2.711 -Forward stepwise logistic regression Logistic regression is a popular method for predictive modeling of categorial data. 2.712 -As a pilot run, for five cortical areas (SS, AUD, RSP, VIS, and MO), we performed forward stepwise logistic regression to 2.713 -find single genes, pairs of genes, and triplets of genes which predict areal identify. This is an example of feature selection 2.714 -integrated with prediction using a stepwise wrapper. Some of the single genes found were shown in various figures throughout 2.715 -this document, and Figure 4 shows a combination of genes which was found. 2.716 -We felt that, for single genes, gradient similarity did a better job than logistic regression at capturing our subjective 2.717 -impression of a &#8220;good gene&#8221;. 2.718 -_________________ 2.719 - 19For each gene, a logistic regression in which the response variable was whether or not a surface pixel was within area AUD, and the predictor 2.720 -variable was the value of the expression of the gene underneath that pixel. The resulting scores were used to rank the genes in terms of how well 2.721 -they predict area AUD. 2.722 - 20For each gene the gradient similarity between (a) a map of the expression of each gene on the cortical surface and (b) the shape of area AUD, 2.723 + 2.724 + 2.725 + 2.726 + 2.727 +Figure 5: From left to right and top 2.728 +to bottom, single genes which roughly 2.729 +identify areas SS (somatosensory primary 2.730 ++ supplemental), SSs (supplemental so- 2.731 +matosensory), PIR (piriform), FRP (frontal 2.732 +pole), RSP (retrosplenial), COApm (Corti- 2.733 +cal amygdalar, posterior part, medial zone). 2.734 +Grouping some areas together, we have 2.735 +also found genes to identify the groups 2.736 +ACA+PL+ILA+DP+ORB+MO (anterior 2.737 +cingulate, prelimbic, infralimbic, dorsal pe- 2.738 +duncular, orbital, motor), posterior and lat- 2.739 +eral visual (VISpm, VISpl, VISI, VISp; pos- 2.740 +teromedial, posterolateral, lateral, and pri- 2.741 +mary visual; the posterior and lateral vi- 2.742 +sual area is distinguished from its neigh- 2.743 +bors, but not from the entire rest of the 2.744 +cortex). The genes are Pitx2, Aldh1a2, 2.745 +Ppfibp1, Slco1a5, Tshz2, Trhr, Col12a1, 2.746 +Ets1. Areas which can be identified by single genes Using gradient simi- 2.747 + larity, we have already found single genes which roughly identify some areas 2.748 + and groupings of areas. For each of these areas, an example of a gene which 2.749 + roughly identifies it is shown in Figure 5. We have not yet cross-verified these 2.750 + genes in other atlases. 2.751 + In addition, there are a number of areas which are almost identified by single 2.752 + genes: COAa+NLOT (anterior part of cortical amygdalar area, nucleus of the 2.753 + lateral olfactory tract), ENT (entorhinal), ACAv (ventral anterior cingulate), 2.754 + VIS (visual), AUD (auditory). 2.755 + These results validate our expectation that the ABA dataset can be ex- 2.756 + ploited to find marker genes for many cortical areas, while also validating the 2.757 + relevancy of our new scoring method, gradient similarity. 2.758 + Combinations of multiple genes are useful and necessary for some 2.759 + areas 2.760 + In Figure 4, we give an example of a cortical area which is not marked by 2.761 + any single gene, but which can be identified combinatorially. Acccording to 2.762 + logistic regression, gene wwc1 is the best fit single gene for predicting whether 2.763 + or not a pixel on the cortical surface belongs to the motor area (area MO). 2.764 + The upper-left picture in Figure 4 shows wwc1&#8217;s spatial expression pattern over 2.765 + the cortex. The lower-right boundary of MO is represented reasonably well by 2.766 + this gene, but the gene overshoots the upper-left boundary. This flattened 2-D 2.767 + representation does not show it, but the area corresponding to the overshoot is 2.768 + the medial surface of the cortex. MO is only found on the dorsal surface. Gene 2.769 + mtif2 is shown in the upper-right. Mtif2 captures MO&#8217;s upper-left boundary, 2.770 + but not its lower-right boundary. Mtif2 does not express very much on the 2.771 + medial surface. By adding together the values at each pixel in these two figures, 2.772 + we get the lower-left image. This combination captures area MO much better 2.773 + than any single gene. 2.774 + This shows that our proposal to develop a method to find combinations of 2.775 + marker genes is both possible and necessary. 2.776 + Feature selection integrated with prediction As noted earlier, in gen- 2.777 + eral, any predictive method can be used for feature selection by running it 2.778 + inside a stepwise wrapper. Also, some predictive methods integrate soft con- 2.779 + straints on number of features used. Examples of both of these will be seen in 2.780 + the section &#8220;Multivariate Predictive methods&#8221;. 2.781 + Multivariate Predictive methods 2.782 + Forward stepwise logistic regression Logistic regression is a popular 2.783 + method for predictive modeling of categorial data. As a pilot run, for five 2.784 + cortical areas (SS, AUD, RSP, VIS, and MO), we performed forward stepwise 2.785 + logistic regression to find single genes, pairs of genes, and triplets of genes 2.786 + which predict areal identify. This is an example of feature selection integrated 2.787 + with prediction using a stepwise wrapper. Some of the single genes found 2.788 + were shown in various figures throughout this document, and Figure 4 shows 2.789 + a combination of genes which was found. 2.790 + We felt that, for single genes, gradient similarity did a better job than 2.791 + logistic regression at capturing our subjective impression of a &#8220;good gene&#8221;. 2.792 +_________________________________________ 2.793 + 18For each gene the gradient similarity between (a) a map of the expression of each gene on the cortical surface and (b) the shape of area AUD, 2.794 was calculated, and this was used to rank the genes. 2.795 2.796 - 2.797 - 2.798 - 2.799 - 2.800 -Figure 5: From left to right and top to bot- 2.801 -tom, single genes which roughly identify ar- 2.802 -eas SS (somatosensory primary + supplemen- 2.803 -tal), SSs (supplemental somatosensory), PIR (pir- 2.804 -iform), FRP (frontal pole), RSP (retrosplenial), 2.805 -COApm (Cortical amygdalar, posterior part, me- 2.806 -dial zone). Grouping some areas together, we 2.807 -have also found genes to identify the groups 2.808 -ACA+PL+ILA+DP+ORB+MO (anterior cingu- 2.809 -late, prelimbic, infralimbic, dorsal peduncular, or- 2.810 -bital, motor), posterior and lateral visual (VISpm, 2.811 -VISpl, VISI, VISp; posteromedial, posterolateral, 2.812 -lateral, and primary visual; the posterior and lat- 2.813 -eral visual area is distinguished from its neigh- 2.814 -bors, but not from the entire rest of the cortex). 2.815 -The genes are Pitx2, Aldh1a2, Ppfibp1, Slco1a5, 2.816 -Tshz2, Trhr, Col12a1, Ets1. 2.817 -SVM on all genes at once 2.818 -In order to see how well one can do when looking at all genes at once, we ran a support vector machine to classify cortical 2.819 -surface pixels based on their gene expression profiles. We achieved classification accuracy of about 81%21. This shows that 2.820 -the genes included in the ABA dataset are sufficient to define much of cortical anatomy. However, as noted above, a classifier 2.821 -that looks at all the genes at once isn&#8217;t as practically useful as a classifier that uses only a few genes. 2.822 -Data-driven redrawing of the cortical map 2.823 -We have applied the following dimensionality reduction algorithms to reduce the dimensionality of the gene expression 2.824 -profile associated with each voxel: Principal Components Analysis (PCA), Simple PCA (SPCA), Multi-Dimensional Scaling 2.825 -(MDS), Isomap, Landmark Isomap, Laplacian eigenmaps, Local Tangent Space Alignment (LTSA), Hessian locally linear 2.826 -embedding, Diffusion maps, Stochastic Neighbor Embedding (SNE), Stochastic Proximity Embedding (SPE), Fast Maximum 2.827 -Variance Unfolding (FastMVU), Non-negative Matrix Factorization (NNMF). Space constraints prevent us from showing 2.828 -_________________________________________ 2.829 - 215-fold cross-validation. 2.830 -many of the results, but as a sample, PCA, NNMF, and landmark Isomap are shown in the first, second, and third rows of 2.831 -Figure 6. 2.832 2.833 2.834 2.835 @@ -559,42 +537,71 @@ 2.836 from left: NNMF. Right: Landmark Isomap. Additional details: In the 2.837 third and fourth rows, 7 dimensions were found, but only 6 displayed. In 2.838 the last row: for PCA, 50 dimensions were used; for NNMF, 6 dimensions 2.839 -were used; for landmark Isomap, 7 dimensions were used. 2.840 +were used; for landmark Isomap, 7 dimensions were used. SVM on all genes at once 2.841 + In order to see how well one can do when 2.842 + looking at all genes at once, we ran a support 2.843 + vector machine to classify cortical surface pix- 2.844 + els based on their gene expression profiles. We 2.845 + achieved classification accuracy of about 81%19. 2.846 + This shows that the genes included in the ABA 2.847 + dataset are sufficient to define much of cortical 2.848 + anatomy. However, as noted above, a classifier 2.849 + that looks at all the genes at once isn&#8217;t as prac- 2.850 + tically useful as a classifier that uses only a few 2.851 + genes. 2.852 + Data-driven redrawing of the cor- 2.853 + tical map 2.854 + We have applied the following dimensional- 2.855 + ity reduction algorithms to reduce the dimen- 2.856 + sionality of the gene expression profile associ- 2.857 + ated with each voxel: Principal Components 2.858 + Analysis (PCA), Simple PCA (SPCA), Multi- 2.859 + Dimensional Scaling (MDS), Isomap, Land- 2.860 + mark Isomap, Laplacian eigenmaps, Local Tan- 2.861 + gent Space Alignment (LTSA), Hessian locally 2.862 + linear embedding, Diffusion maps, Stochastic 2.863 + Neighbor Embedding (SNE), Stochastic Prox- 2.864 + imity Embedding (SPE), Fast Maximum Vari- 2.865 + ance Unfolding (FastMVU), Non-negative Ma- 2.866 + trix Factorization (NNMF). Space constraints 2.867 + prevent us from showing many of the results, 2.868 + but as a sample, PCA, NNMF, and landmark 2.869 + Isomap are shown in the first, second, and third 2.870 +rows of Figure 6. 2.871 2.872 -Figure 7: Prototypes corresponding to sample gene clusters, clustered by 2.873 -gradient similarity. Region boundaries for the region that most matches 2.874 -each prototype are overlayed. After applying the dimensionality reduc- 2.875 - tion, we ran clustering algorithms on the re- 2.876 - duced data. To date we have tried k-means and 2.877 - spectral clustering. The results of k-means after 2.878 - PCA, NNMF, and landmark Isomap are shown 2.879 - in the last row of Figure 6. To compare, the 2.880 - leftmost picture on the bottom row of Figure 2.881 - 6 shows some of the major subdivisions of cor- 2.882 - tex. These results clearly show that different di- 2.883 - mensionality reduction techniques capture dif- 2.884 - ferent aspects of the data and lead to differ- 2.885 - ent clusterings, indicating the utility of our pro- 2.886 - posal to produce a detailed comparion of these 2.887 - techniques as applied to the domain of genomic 2.888 - anatomy. 2.889 -Many areas are captured by clusters of genes We also clustered the genes using gradient similarity to see if the 2.890 -spatial regions defined by any clusters matched known anatomical regions. Figure 7 shows, for ten sample gene clusters, each 2.891 -cluster&#8217;s average expression pattern, compared to a known anatomical boundary. This suggests that it is worth attempting 2.892 -to cluster genes, and then to use the results to cluster voxels. 2.893 +Figure 7: Prototypes corresponding to sample gene clusters, 2.894 +clustered by gradient similarity. Region boundaries for the 2.895 +region that most matches each prototype are overlayed. After applying the dimensionality reduction, we ran clus- 2.896 + tering algorithms on the reduced data. To date we have tried 2.897 + k-means and spectral clustering. The results of k-means af- 2.898 + ter PCA, NNMF, and landmark Isomap are shown in the 2.899 + last row of Figure 6. To compare, the leftmost picture on 2.900 + the bottom row of Figure 6 shows some of the major sub- 2.901 + divisions of cortex. These results clearly show that differ- 2.902 + ent dimensionality reduction techniques capture different as- 2.903 + pects of the data and lead to different clusterings, indicating 2.904 + the utility of our proposal to produce a detailed comparion 2.905 + of these techniques as applied to the domain of genomic 2.906 + anatomy. 2.907 + Many areas are captured by clusters of genes We 2.908 + also clustered the genes using gradient similarity to see if 2.909 + the spatial regions defined by any clusters matched known 2.910 +anatomical regions. Figure 7 shows, for ten sample gene clusters, each cluster&#8217;s average expression pattern, compared to 2.911 +a known anatomical boundary. This suggests that it is worth attempting to cluster genes, and then to use the results to 2.912 +cluster voxels. 2.913 +_____________________________ 2.914 + 195-fold cross-validation. 2.915 Research Design and Methods 2.916 Further work on flatmapping 2.917 -In anatomy, the manifold of interest is usually either defined by a combination of two relevant anatomical axes (todo), 2.918 -or by the surface of the structure (as is the case with the cortex). In the former case, the manifold of interest is a plane, but 2.919 -in the latter case it is curved. If the manifold is curved, there are various methods for mapping the manifold into a plane. 2.920 -In the case of the cerebral cortex, it remains to be seen which method of mapping the manifold into a plane is optimal 2.921 -for this application. We will compare mappings which attempt to preserve size (such as the one used by Caret[5]) with 2.922 -mappings which preserve angle (conformal maps). 2.923 -Although there is much 2-D organization in anatomy, there are also structures whose shape is fundamentally 3-dimensional. 2.924 -If possible, we would like the method we develop to include a statistical test that warns the user if the assumption of 2-D 2.925 -structure seems to be wrong. 2.926 -todo amongst other things: 2.927 -layerfinding 2.928 +Often the surface of a structure serves as a natural 2-D basis for anatomical organization. Even when the shape of the 2.929 +surface is known, there are multiple ways to map it into a plane. We will compare mappings which attempt to preserve 2.930 +size (such as the one used by Caret[7]) with mappings which preserve angle (conformal maps). Although there is much 2-D 2.931 +organization in anatomy, there are also structures whose anatomy is fundamentally 3-dimensional. We plan to include a 2.932 +statistical test that warns the user if the assumption of 2-D structure seems to be wrong. 2.933 +Automatic segmentation of cortical layers 2.934 +Extension to probabalistic maps Presently, we do not have a probabalistic atlas which is registered to the ABA 2.935 +space. However, in anticipation of the availability of such maps, we would like to explore extensions to our Aim 1 techniques 2.936 +which can handle probabalistic maps. 2.937 Develop algorithms that find genetic markers for anatomical regions 2.938 1.Develop scoring measures for evaluating how good individual genes are at marking areas: we will compare pointwise, 2.939 geometric, and information-theoretic measures. 2.940 @@ -612,10 +619,7 @@ 2.941 a larger area which can be fit. 2.942 # Linear discriminant analysis 2.943 Decision trees todo 2.944 -For each cortical area, we used the C4.5 algorithm to find a pruned decision tree and ruleset for that area. We achieved 2.945 -estimated classification accuracy of more than 99.6% on each cortical area (as evaluated on the training data without 2.946 -cross-validation; so actual accuracy is expected to be lower). However, the resulting decision trees each made use of many 2.947 -genes. 2.948 +20. 2.949 Apply these algorithms to the cortex 2.950 1.Create open source format conversion tools: we will create tools to bulk download the ABA dataset and to convert 2.951 between SEV, NIFTI and MATLAB formats. 2.952 @@ -628,74 +632,90 @@ 2.953 1.Explore dimensionality reduction algorithms applied to pixels: including TODO 2.954 2.Explore dimensionality reduction algorithms applied to genes: including TODO 2.955 3.Explore clustering algorithms applied to pixels: including TODO 2.956 +_________________________________________ 2.957 + 20Already, for each cortical area, we have used the C4.5 algorithm to find a decision tree for that area. We achieved good classification accuracy 2.958 +on our training set, but the number of genes that appeared in each tree was too large. We plan to implement a pruning procedure to generate 2.959 +trees that use fewer genes 2.960 4.Explore clustering algorithms applied to genes: including gene shaving, TODO 2.961 5.Develop an algorithm to use dimensionality reduction and/or hierarchial clustering to create anatomical maps 2.962 6.Run this algorithm on the cortex: present a hierarchial, genoarchitectonic map of the cortex 2.963 # Linear discriminant analysis 2.964 # jbt, coclustering 2.965 # self-organizing map 2.966 -# confirm with EMAGE, GeneAtlas, GENSAT, etc, to fight overfitting 2.967 +# confirm with EMAGE, GeneAtlas, GENSAT, etc, to fight overfitting, two hemis 2.968 # compare using clustering scores 2.969 # multivariate gradient similarity 2.970 # deep belief nets 2.971 # note: slice artifact 2.972 Bibliography &amp; References Cited 2.973 -[1]Tanya Barrett, Dennis B. Troup, Stephen E. Wilhite, Pierre Ledoux, Dmitry Rudnev, Carlos Evangelista, Irene F. 2.974 +[1]Chris Adamson, Leigh Johnston, Terrie Inder, Sandra Rees, Iven Mareels, and Gary Egan. A Tracking Approach to 2.975 +Parcellation of the Cerebral Cortex, volume Volume 3749/2005 of Lecture Notes in Computer Science, pages 294&#8211;301. 2.976 +Springer Berlin / Heidelberg, 2005. 2.977 +[2]J. Annese, A. Pitiot, I. D. Dinov, and A. W. Toga. A myelo-architectonic method for the structural classification of 2.978 +cortical areas. NeuroImage, 21(1):15&#8211;26, 2004. 2.979 +[3]Tanya Barrett, Dennis B. Troup, Stephen E. Wilhite, Pierre Ledoux, Dmitry Rudnev, Carlos Evangelista, Irene F. 2.980 Kim, Alexandra Soboleva, Maxim Tomashevsky, and Ron Edgar. NCBI GEO: mining tens of millions of expression 2.981 profiles&#8211;database and tools update. Nucl. Acids Res., 35(suppl_1):D760&#8211;765, 2007. 2.982 -[2]George W. Bell, Tatiana A. Yatskievych, and Parker B. Antin. GEISHA, a whole-mount in situ hybridization gene 2.983 +[4]George W. Bell, Tatiana A. Yatskievych, and Parker B. Antin. GEISHA, a whole-mount in situ hybridization gene 2.984 expression screen in chicken embryos. Developmental Dynamics, 229(3):677&#8211;687, 2004. 2.985 -[3]James P Carson, Tao Ju, Hui-Chen Lu, Christina Thaller, Mei Xu, Sarah L Pallas, Michael C Crair, Joe Warren, Wah 2.986 +[5]James P Carson, Tao Ju, Hui-Chen Lu, Christina Thaller, Mei Xu, Sarah L Pallas, Michael C Crair, Joe Warren, Wah 2.987 Chiu, and Gregor Eichele. A digital atlas to characterize the mouse brain transcriptome. PLoS Comput Biol, 1(4):e41, 2.988 2005. 2.989 -[4]Mark H. Chin, Alex B. Geng, Arshad H. Khan, Wei-Jun Qian, Vladislav A. Petyuk, Jyl Boline, Shawn Levy, Arthur W. 2.990 +[6]Mark H. Chin, Alex B. Geng, Arshad H. Khan, Wei-Jun Qian, Vladislav A. Petyuk, Jyl Boline, Shawn Levy, Arthur W. 2.991 Toga, Richard D. Smith, Richard M. Leahy, and Desmond J. Smith. A genome-scale map of expression for a mouse 2.992 brain section obtained using voxelation. Physiol. Genomics, 30(3):313&#8211;321, August 2007. 2.993 -[5]D C Van Essen, H A Drury, J Dickson, J Harwell, D Hanlon, and C H Anderson. An integrated software suite for surface- 2.994 +[7]D C Van Essen, H A Drury, J Dickson, J Harwell, D Hanlon, and C H Anderson. An integrated software suite for surface- 2.995 based analyses of cerebral cortex. Journal of the American Medical Informatics Association: JAMIA, 8(5):443&#8211;59, 2001. 2.996 PMID: 11522765. 2.997 -[6]Shiaoching Gong, Chen Zheng, Martin L. Doughty, Kasia Losos, Nicholas Didkovsky, Uta B. Schambra, Norma J. 2.998 +[8]Shiaoching Gong, Chen Zheng, Martin L. Doughty, Kasia Losos, Nicholas Didkovsky, Uta B. Schambra, Norma J. 2.999 Nowak, Alexandra Joyner, Gabrielle Leblanc, Mary E. Hatten, and Nathaniel Heintz. A gene expression atlas of the 2.1000 central nervous system based on bacterial artificial chromosomes. Nature, 425(6961):917&#8211;925, October 2003. 2.1001 -[7]Jano Hemert and Richard Baldock. Matching Spatial Regions with Combinations of Interacting Gene Expression Pat- 2.1002 +[9]Jano Hemert and Richard Baldock. Matching Spatial Regions with Combinations of Interacting Gene Expression Pat- 2.1003 terns, volume 13 of Communications in Computer and Information Science, pages 347&#8211;361. Springer Berlin Heidelberg, 2.1004 2008. 2.1005 -[8]Erh-Fang Lee, Jyl Boline, and Arthur W. Toga. A High-Resolution anatomical framework of the neonatal mouse brain 2.1006 +[10]F. Kruggel, M. K. Brckner, Th. Arendt, C. J. Wiggins, and D. Y. von Cramon. Analyzing the neocortical fine-structure. 2.1007 +Medical Image Analysis, 7(3):251&#8211;264, September 2003. 2.1008 +[11]Erh-Fang Lee, Jyl Boline, and Arthur W. Toga. A High-Resolution anatomical framework of the neonatal mouse brain 2.1009 for managing gene expression data. Frontiers in Neuroinformatics, 1:6, 2007. PMC2525996. 2.1010 -[9]Susan Magdaleno, Patricia Jensen, Craig L. Brumwell, Anna Seal, Karen Lehman, Andrew Asbury, Tony Cheung, 2.1011 +[12]Susan Magdaleno, Patricia Jensen, Craig L. Brumwell, Anna Seal, Karen Lehman, Andrew Asbury, Tony Cheung, 2.1012 Tommie Cornelius, Diana M. Batten, Christopher Eden, Shannon M. Norland, Dennis S. Rice, Nilesh Dosooye, Sundeep 2.1013 Shakya, Perdeep Mehta, and Tom Curran. BGEM: an in situ hybridization database of gene expression in the embryonic 2.1014 and adult mouse nervous system. PLoS Biology, 4(4):e86 EP &#8211;, April 2006. 2.1015 -[10]Lydia Ng, Amy Bernard, Chris Lau, Caroline C Overly, Hong-Wei Dong, Chihchau Kuan, Sayan Pathak, Susan M 2.1016 +[13]Lydia Ng, Amy Bernard, Chris Lau, Caroline C Overly, Hong-Wei Dong, Chihchau Kuan, Sayan Pathak, Susan M 2.1017 Sunkin, Chinh Dang, Jason W Bohland, Hemant Bokil, Partha P Mitra, Luis Puelles, John Hohmann, David J Anderson, 2.1018 Ed S Lein, Allan R Jones, and Michael Hawrylycz. An anatomic gene expression atlas of the adult mouse brain. Nat 2.1019 Neurosci, 12(3):356&#8211;362, March 2009. 2.1020 -[11]George Paxinos and Keith B.J. Franklin. The Mouse Brain in Stereotaxic Coordinates. Academic Press, 2 edition, July 2.1021 +[14]George Paxinos and Keith B.J. Franklin. The Mouse Brain in Stereotaxic Coordinates. Academic Press, 2 edition, July 2.1022 2001. 2.1023 -[12]Constance M. Smith, Jacqueline H. Finger, Terry F. Hayamizu, Ingeborg J. McCright, Janan T. Eppig, James A. 2.1024 +[15]A. Schleicher, N. Palomero-Gallagher, P. Morosan, S. Eickhoff, T. Kowalski, K. Vos, K. Amunts, and K. Zilles. Quanti- 2.1025 +tative architectural analysis: a new approach to cortical mapping. Anatomy and Embryology, 210(5):373&#8211;386, December 2.1026 +2005. 2.1027 +[16]Oliver Schmitt, Lars Hmke, and Lutz Dmbgen. Detection of cortical transition regions utilizing statistical analyses of 2.1028 +excess masses. NeuroImage, 19(1):42&#8211;63, May 2003. 2.1029 +[17]Constance M. Smith, Jacqueline H. Finger, Terry F. Hayamizu, Ingeborg J. McCright, Janan T. Eppig, James A. 2.1030 Kadin, Joel E. Richardson, and Martin Ringwald. The mouse gene expression database (GXD): 2007 update. Nucl. 2.1031 Acids Res., 35(suppl_1):D618&#8211;623, 2007. 2.1032 -[13]Judy Sprague, Leyla Bayraktaroglu, Dave Clements, Tom Conlin, David Fashena, Ken Frazer, Melissa Haendel, Dou- 2.1033 +[18]Judy Sprague, Leyla Bayraktaroglu, Dave Clements, Tom Conlin, David Fashena, Ken Frazer, Melissa Haendel, Dou- 2.1034 glas G Howe, Prita Mani, Sridhar Ramachandran, Kevin Schaper, Erik Segerdell, Peiran Song, Brock Sprunger, Sierra 2.1035 Taylor, Ceri E Van Slyke, and Monte Westerfield. The zebrafish information network: the zebrafish model organism 2.1036 database. Nucleic Acids Research, 34(Database issue):D581&#8211;5, 2006. PMID: 16381936. 2.1037 -[14]Larry Swanson. Brain Maps: Structure of the Rat Brain. Academic Press, 3 edition, November 2003. 2.1038 -[15]Carol L. Thompson, Sayan D. Pathak, Andreas Jeromin, Lydia L. Ng, Cameron R. MacPherson, Marty T. Mortrud, 2.1039 +[19]Larry Swanson. Brain Maps: Structure of the Rat Brain. Academic Press, 3 edition, November 2003. 2.1040 +[20]Carol L. Thompson, Sayan D. Pathak, Andreas Jeromin, Lydia L. Ng, Cameron R. MacPherson, Marty T. Mortrud, 2.1041 Allison Cusick, Zackery L. Riley, Susan M. Sunkin, Amy Bernard, Ralph B. Puchalski, Fred H. Gage, Allan R. Jones, 2.1042 Vladimir B. Bajic, Michael J. Hawrylycz, and Ed S. Lein. Genomic anatomy of the hippocampus. Neuron, 60(6):1010&#8211; 2.1043 1021, December 2008. 2.1044 -[16]Pavel Tomancak, Amy Beaton, Richard Weiszmann, Elaine Kwan, ShengQiang Shu, Suzanna E Lewis, Stephen 2.1045 +[21]Pavel Tomancak, Amy Beaton, Richard Weiszmann, Elaine Kwan, ShengQiang Shu, Suzanna E Lewis, Stephen 2.1046 Richards, Michael Ashburner, Volker Hartenstein, Susan E Celniker, and Gerald M Rubin. Systematic determina- 2.1047 tion of patterns of gene expression during drosophila embryogenesis. Genome Biology, 3(12):research008818814, 2002. 2.1048 PMC151190. 2.1049 -[17]Jano van Hemert and Richard Baldock. Mining Spatial Gene Expression Data for Association Rules, volume 4414/2007 2.1050 +[22]Jano van Hemert and Richard Baldock. Mining Spatial Gene Expression Data for Association Rules, volume 4414/2007 2.1051 of Lecture Notes in Computer Science, pages 66&#8211;76. Springer Berlin / Heidelberg, 2007. 2.1052 -[18]Shanmugasundaram Venkataraman, Peter Stevenson, Yiya Yang, Lorna Richardson, Nicholas Burton, Thomas P. Perry, 2.1053 +[23]Shanmugasundaram Venkataraman, Peter Stevenson, Yiya Yang, Lorna Richardson, Nicholas Burton, Thomas P. Perry, 2.1054 Paul Smith, Richard A. Baldock, Duncan R. Davidson, and Jeffrey H. Christiansen. EMAGE edinburgh mouse atlas 2.1055 of gene expression: 2008 update. Nucl. Acids Res., 36(suppl_1):D860&#8211;865, 2008. 2.1056 -[19]Axel Visel, Christina Thaller, and Gregor Eichele. GenePaint.org: an atlas of gene expression patterns in the mouse 2.1057 +[24]Axel Visel, Christina Thaller, and Gregor Eichele. GenePaint.org: an atlas of gene expression patterns in the mouse 2.1058 embryo. Nucl. Acids Res., 32(suppl_1):D552&#8211;556, 2004. 2.1059 -[20]Robert H Waterston, Kerstin Lindblad-Toh, Ewan Birney, Jane Rogers, Josep F Abril, Pankaj Agarwal, Richa Agar- 2.1060 +[25]Robert H Waterston, Kerstin Lindblad-Toh, Ewan Birney, Jane Rogers, Josep F Abril, Pankaj Agarwal, Richa Agar- 2.1061 wala, Rachel Ainscough, Marina Alexandersson, Peter An, Stylianos E Antonarakis, John Attwood, Robert Baertsch, 2.1062 Jonathon Bailey, Karen Barlow, Stephan Beck, Eric Berry, Bruce Birren, Toby Bloom, Peer Bork, Marc Botcherby, 2.1063 Nicolas Bray, Michael R Brent, Daniel G Brown, Stephen D Brown, Carol Bult, John Burton, Jonathan Butler, 2.1064 @@ -729,11 +749,4 @@ 2.1065 Evgeny M Zdobnov, Michael C Zody, and Eric S Lander. Initial sequencing and comparative analysis of the mouse 2.1066 genome. Nature, 420(6915):520&#8211;62, December 2002. PMID: 12466850. 2.1067 2.1068 -_______________________________________________________________________________________________________ 2.1069 - stuff i dunno where to put yet (there is more scattered through grant-oldtext): 2.1070 - Principle 4: Work in 2-D whenever possible 2.1071 - &#8212; 2.1072 - note: 2.1073 - two hemis 2.1074 2.1075 -
3.1 Binary file grant.odt has changed
4.1 Binary file grant.pdf has changed
5.1 --- a/grant.txt Tue Apr 21 00:54:22 2009 -0700 5.2 +++ b/grant.txt Tue Apr 21 03:36:06 2009 -0700 5.3 @@ -27,23 +27,27 @@ 5.4 5.5 === Aim 1: Given a map of regions, find genes that mark the regions === 5.6 5.7 -After defining terms, we will describe a set of principles which determine our strategy to completing this aim. 5.8 - 5.9 -\vspace{0.3cm}**Machine learning terminology: supervised learning** The task of looking for marker genes for known anatomical regions means that one is looking for a set of genes such that, if the expression level of those genes is known, then the locations of the regions can be inferred. 5.10 - 5.11 -If we define the regions so that they cover the entire anatomical structure to be divided, then instead of saying that we are using gene expression to find the locations of the regions, we may say that we are using gene expression to determine to which region each voxel within the structure belongs. We call this a __classification task__, because each voxel is being assigned to a class (namely, its region). 5.12 - 5.13 -Therefore, an understanding of the relationship between the combination of their expression levels and the locations of the regions may be expressed as a function. The input to this function is a voxel, along with the gene expression levels within that voxel; the output is the regional identity of the target voxel, that is, the region to which the target voxel belongs. We call this function a __classifier__. In general, the input to a classifier is called an __instance__, and the output is called a __label__ (or a __class label__). 5.14 - 5.15 -The object of aim 1 is not to produce a single classifier, but rather to develop an automated method for determining a classifier for any known anatomical structure. Therefore, we seek a procedure by which a gene expression dataset may be analyzed in concert with an anatomical atlas in order to produce a classifier. Such a procedure is a type of a machine learning procedure. The construction of the classifier is called __training__ (also __learning__), and the initial gene expression dataset used in the construction of the classifier is called __training data__. 5.16 - 5.17 -In the machine learning literature, this sort of procedure may be thought of as a __supervised learning task__, defined as a task in which the goal is to learn a mapping from instances to labels, and the training data consists of a set of instances (voxels) for which the labels (regions) are known. 5.18 +\vspace{0.3cm}**Machine learning terminology** The task of looking for marker genes for known anatomical regions means that one is looking for a set of genes such that, if the expression level of those genes is known, then the locations of the regions can be inferred. 5.19 + 5.20 +%% then instead of saying that we are using gene expression to find the locations of the regions, 5.21 + 5.22 +%%If we define the regions so that they cover the entire anatomical structure to be divided, we may say that we are using gene expression to determine to which region each voxel within the structure belongs. We call this a __classification task__, because each voxel is being assigned to a class (namely, its region). 5.23 + 5.24 +%%Therefore, an understanding of the relationship between the combination of their expression levels and the locations of the regions may be expressed as a function. The input to this function is a voxel, along with the gene expression levels within that voxel; the output is the regional identity of the target voxel, that is, the region to which the target voxel belongs. We call this function a __classifier__. In general, the input to a classifier is called an __instance__, and the output is called a __label__ (or a __class label__). 5.25 + 5.26 +If we define the regions so that they cover the entire anatomical structure to be divided, we may say that we are using gene expression to determine to which region each voxel within the structure belongs. We call this a __classification task__, because each voxel is being assigned to a class (namely, its region). An understanding of the relationship between the combination of their expression levels and the locations of the regions may be expressed as a function. The input to this function is a voxel, along with the gene expression levels within that voxel; the output is the regional identity of the target voxel, that is, the region to which the target voxel belongs. We call this function a __classifier__. In general, the input to a classifier is called an __instance__, and the output is called a __label__ (or a __class label__). 5.27 + 5.28 +%% The construction of the classifier is called __training__ (also __learning__), and 5.29 + 5.30 +The object of aim 1 is not to produce a single classifier, but rather to develop an automated method for determining a classifier for any known anatomical structure. Therefore, we seek a procedure by which a gene expression dataset may be analyzed in concert with an anatomical atlas in order to produce a classifier. The initial gene expression dataset used in the construction of the classifier is called __training data__. In the machine learning literature, this sort of procedure may be thought of as a __supervised learning task__, defined as a task in which the goal is to learn a mapping from instances to labels, and the training data consists of a set of instances (voxels) for which the labels (regions) are known. 5.31 5.32 Each gene expression level is called a __feature__, and the selection of which genes\footnote{Strictly speaking, the features are gene expression levels, but we'll call them genes.} to include is called __feature selection__. Feature selection is one component of the task of learning a classifier. Some methods for learning classifiers start out with a separate feature selection phase, whereas other methods combine feature selection with other aspects of training. 5.33 5.34 One class of feature selection methods assigns some sort of score to each candidate gene. The top-ranked genes are then chosen. Some scoring measures can assign a score to a set of selected genes, not just to a single gene; in this case, a dynamic procedure may be used in which features are added and subtracted from the selected set depending on how much they raise the score. Such procedures are called "stepwise" or "greedy". 5.35 5.36 -Although the classifier itself may only look at the gene expression data within each voxel before classifying that voxel, the learning algorithm which constructs the classifier may look over the entire dataset. We can categorize score-based feature selection methods depending on how the score of calculated. Often the score calculation consists of assigning a sub-score to each voxel, and then aggregating these sub-scores into a final score (the aggregation is often a sum or a sum of squares or average). If only information from nearby voxels is used to calculate a voxel's sub-score, then we say it is a __local scoring method__. If only information from the voxel itself is used to calculate a voxel's sub-score, then we say it is a __pointwise scoring method__. 5.37 +Although the classifier itself may only look at the gene expression data within each voxel before classifying that voxel, the algorithm which constructs the classifier may look over the entire dataset. We can categorize score-based feature selection methods depending on how the score of calculated. Often the score calculation consists of assigning a sub-score to each voxel, and then aggregating these sub-scores into a final score (the aggregation is often a sum or a sum of squares or average). If only information from nearby voxels is used to calculate a voxel's sub-score, then we say it is a __local scoring method__. If only information from the voxel itself is used to calculate a voxel's sub-score, then we say it is a __pointwise scoring method__. 5.38 + 5.39 +=== Our strategy for Aim 1 === 5.40 5.41 Key questions when choosing a learning method are: What are the instances? What are the features? How are the features chosen? Here are four principles that outline our answers to these questions. 5.42 5.43 @@ -69,9 +73,7 @@ 5.44 \vspace{0.3cm}**Principle 4: Work in 2-D whenever possible** 5.45 5.46 5.47 -There are many anatomical structures which are commonly characterized in terms of a two-dimensional manifold. When it is known that the structure that one is looking for is two-dimensional, the results may be improved by allowing the analysis algorithm to take advantage of this prior knowledge. In addition, it is easier for humans to visualize and work with 2-D data. 5.48 - 5.49 -Therefore, when possible, the instances should represent pixels, not voxels. 5.50 +There are many anatomical structures which are commonly characterized in terms of a two-dimensional manifold. When it is known that the structure that one is looking for is two-dimensional, the results may be improved by allowing the analysis algorithm to take advantage of this prior knowledge. In addition, it is easier for humans to visualize and work with 2-D data. Therefore, when possible, the instances should represent pixels, not voxels. 5.51 5.52 5.53 === Related work === 5.54 @@ -85,23 +87,15 @@ 5.55 5.56 \cite{lee_high-resolution_2007} mentions the possibility of constructing a spatial region for each gene, and then, for each anatomical structure of interest, computing what proportion of this structure is covered by the gene's spatial region. 5.57 5.58 -GeneAtlas\cite{carson_digital_2005} and EMAGE \cite{venkataraman_emage_2008} allow the user to construct a search query by demarcating regions and then specifing either the strength of expression or the name of another gene or dataset whose expression pattern is to be matched. For the similiarity score (match score) between two images (in this case, the query and the gene expression images), GeneAtlas uses the sum of a weighted L1-norm distance between vectors whose components represent the number of cells within a pixel\footnote{Actually, many of these projects use quadrilaterals instead of square pixels; but we will refer to them as pixels for simplicity.} whose expression is within four discretization levels. EMAGE uses Jaccard similarity, which is equal to the number of true pixels in the intersection of the two images, divided by the number of pixels in their union. Neither GeneAtlas nor EMAGE allow one to search for combinations of genes that define a region in concert but not separately. 5.59 +GeneAtlas\cite{carson_digital_2005} and EMAGE \cite{venkataraman_emage_2008} allow the user to construct a search query by demarcating regions and then specifing either the strength of expression or the name of another gene or dataset whose expression pattern is to be matched. For the similiarity score (match score) between two images (in this case, the query and the gene expression images), GeneAtlas uses the sum of a weighted L1-norm distance between vectors whose components represent the number of cells within a pixel\footnote{Actually, many of these projects use quadrilaterals instead of square pixels; but we will refer to them as pixels for simplicity.} whose expression is within four discretization levels. EMAGE uses Jaccard similarity\footnote{the number of true pixels in the intersection of the two images, divided by the number of pixels in their union.}. Neither GeneAtlas nor EMAGE allow one to search for combinations of genes that define a region in concert but not separately. 5.60 5.61 \cite{ng_anatomic_2009} describes AGEA, "Anatomic Gene Expression 5.62 Atlas". AGEA has three 5.63 -components: 5.64 - 5.65 -\begin{itemize} 5.66 -\item Gene Finder: The user selects a seed voxel and the system (1) chooses a 5.67 +components. **Gene Finder**: The user selects a seed voxel and the system (1) chooses a 5.68 cluster which includes the seed voxel, (2) yields a list of genes 5.69 -which are overexpressed in that cluster. (note: the ABA website also contains pre-prepared lists of overexpressed genes for selected structures) 5.70 - 5.71 -\item Correlation: The user selects a seed voxel and the system 5.72 +which are overexpressed in that cluster. (note: the ABA website also contains pre-prepared lists of overexpressed genes for selected structures). **Correlation**: The user selects a seed voxel and the system 5.73 then shows the user how much correlation there is between the gene 5.74 -expression profile of the seed voxel and every other voxel. 5.75 - 5.76 -\item Clusters: will be described later 5.77 -\end{itemize} 5.78 +expression profile of the seed voxel and every other voxel. **Clusters**: will be described later 5.79 5.80 Gene Finder is different from our Aim 1 in at least three ways. First, Gene Finder finds only single genes, whereas we will also look for combinations of genes. Second, gene finder can only use overexpression as a marker, whereas we will also search for underexpression. Third, Gene Finder uses a simple pointwise score\footnote{"Expression energy ratio", which captures overexpression.}, whereas we will also use geometric scores such as gradient similarity (described in Preliminary Studies). Figures \ref{MOcombo}, \ref{hole}, and \ref{AUDgeometry} in the Preliminary Studies section contains evidence that each of our three choices is the right one. 5.81 5.82 @@ -122,35 +116,36 @@ 5.83 5.84 The task of deciding how to carve up a structure into anatomical regions can be put into these terms. The instances are once again voxels (or pixels) along with their associated gene expression profiles. We make the assumption that voxels from the same anatomical region have similar gene expression profiles, at least compared to the other regions. This means that clustering voxels is the same as finding potential regions; we seek a partitioning of the voxels into regions, that is, into clusters of voxels with similar gene expression. 5.85 5.86 -It is desirable to determine not just one set of regions, but also how these regions relate to each other, if at all; perhaps some of the regions are more similar to each other than to the rest, suggesting that, although at a fine spatial scale they could be considered separate, on a coarser spatial scale they could be grouped together into one large region. This suggests the outcome of clustering may be a hierarchial tree of clusters, rather than a single set of clusters which partition the voxels. This is called hierarchial clustering. 5.87 +%%It is desirable to determine not just one set of regions, but also how these regions relate to each other, if at all; perhaps some of the regions are more similar to each other than to the rest, suggesting that, although at a fine spatial scale they could be considered separate, on a coarser spatial scale they could be grouped together into one large region. This suggests the outcome of clustering may be a hierarchial tree of clusters, rather than a single set of clusters which partition the voxels. This is called hierarchial clustering. 5.88 + 5.89 +It is desirable to determine not just one set of regions, but also how these regions relate to each other. The outcome of clustering may be a hierarchial tree of clusters, rather than a single set of clusters which partition the voxels. This is called hierarchial clustering. 5.90 5.91 5.92 \vspace{0.3cm}**Similarity scores** 5.93 - 5.94 A crucial choice when designing a clustering method is how to measure similarity, across either pairs of instances, or clusters, or both. There is much overlap between scoring methods for feature selection (discussed above under Aim 1) and scoring methods for similarity. 5.95 5.96 5.97 \vspace{0.3cm}**Spatially contiguous clusters; image segmentation** 5.98 - 5.99 - 5.100 We have shown that aim 2 is a type of clustering task. In fact, it is a special type of clustering task because we have an additional constraint on clusters; voxels grouped together into a cluster must be spatially contiguous. In Preliminary Studies, we show that one can get reasonable results without enforcing this constraint; however, we plan to compare these results against other methods which guarantee contiguous clusters. 5.101 5.102 -Perhaps the biggest source of continguous clustering algorithms is the field of computer vision, which has produced a variety of image segmentation algorithms. Image segmentation is the task of partitioning the pixels in a digital image into clusters, usually contiguous clusters. Aim 2 is similar to an image segmentation task. There are two main differences; in our task, there are thousands of color channels (one for each gene), rather than just three. However, there are imaging tasks which use more than three colors, for example multispectral imaging and hyperspectral imaging, which are often used to process satellite imagery. A more crucial difference is that there are various cues which are appropriate for detecting sharp object boundaries in a visual scene but which are not appropriate for segmenting abstract spatial data such as gene expression. Although many image segmentation algorithms can be expected to work well for segmenting other sorts of spatially arranged data, some of these algorithms are specialized for visual images. 5.103 +%%Perhaps the biggest source of continguous clustering algorithms is the field of computer vision, which has produced a variety of image segmentation algorithms. Image segmentation is the task of partitioning the pixels in a digital image into clusters, usually contiguous clusters. Aim 2 is similar to an image segmentation task. There are two main differences; in our task, there are thousands of color channels (one for each gene), rather than just three. However, there are imaging tasks which use more than three colors, for example multispectral imaging and hyperspectral imaging, which are often used to process satellite imagery. A more crucial difference is that there are various cues which are appropriate for detecting sharp object boundaries in a visual scene but which are not appropriate for segmenting abstract spatial data such as gene expression. Although many image segmentation algorithms can be expected to work well for segmenting other sorts of spatially arranged data, some of these algorithms are specialized for visual images. 5.104 + 5.105 +Image segmentation is the task of partitioning the pixels in a digital image into clusters, usually contiguous clusters. Aim 2 is similar to an image segmentation task. There are two main differences; in our task, there are thousands of color channels (one for each gene), rather than just three\footnote{There are imaging tasks which use more than three colors, for example multispectral imaging and hyperspectral imaging, which are often used to process satellite imagery.}. A more crucial difference is that there are various cues which are appropriate for detecting sharp object boundaries in a visual scene but which are not appropriate for segmenting abstract spatial data such as gene expression. Although many image segmentation algorithms can be expected to work well for segmenting other sorts of spatially arranged data, some of these algorithms are specialized for visual images. 5.106 5.107 5.108 \vspace{0.3cm}**Dimensionality reduction** 5.109 In this section, we discuss reducing the length of the per-pixel gene expression feature vector. By "dimension", we mean the dimension of this vector, not the spatial dimension of the underlying data. 5.110 5.111 -Unlike aim 1, there is no externally-imposed need to select only a handful of informative genes for inclusion in the instances. However, some clustering algorithms perform better on small numbers of features. There are techniques which "summarize" a larger number of features using a smaller number of features; these techniques go by the name of feature extraction or dimensionality reduction. The small set of features that such a technique yields is called the __reduced feature set__. After the reduced feature set is created, the instances may be replaced by __reduced instances__, which have as their features the reduced feature set rather than the original feature set of all gene expression levels. Note that the features in the reduced feature set do not necessarily correspond to genes; each feature in the reduced set may be any function of the set of gene expression levels. 5.112 - 5.113 -Dimensionality reduction before clustering is useful on large datasets. First, because the number of features in the reduced dataset is less than in the original dataset, the running time of clustering algorithms may be much less. Second, it is thought that some clustering algorithms may give better results on reduced data. 5.114 - 5.115 -Another use for dimensionality reduction is to visualize the relationships between regions after clustering. For example, one might want to make a 2-D plot upon which each region is represented by a single point, and with the property that regions with similar gene expression profiles should be nearby on the plot (that is, the property that distance between pairs of points in the plot should be proportional to some measure of dissimilarity in gene expression). It is likely that no arrangement of the points on a 2-D plan will exactly satisfy this property; however, dimensionality reduction techniques allow one to find arrangements of points that approximately satisfy that property. Note that in this application, dimensionality reduction is being applied after clustering; whereas in the previous paragraph, we were talking about using dimensionality reduction before clustering. 5.116 +%% After the reduced feature set is created, the instances may be replaced by __reduced instances__, which have as their features the reduced feature set rather than the original feature set of all gene expression levels. 5.117 + 5.118 +Unlike aim 1, there is no externally-imposed need to select only a handful of informative genes for inclusion in the instances. However, some clustering algorithms perform better on small numbers of features\footnote{First, because the number of features in the reduced dataset is less than in the original dataset, the running time of clustering algorithms may be much less. Second, it is thought that some clustering algorithms may give better results on reduced data.}. There are techniques which "summarize" a larger number of features using a smaller number of features; these techniques go by the name of feature extraction or dimensionality reduction. The small set of features that such a technique yields is called the __reduced feature set__. Note that the features in the reduced feature set do not necessarily correspond to genes; each feature in the reduced set may be any function of the set of gene expression levels. 5.119 + 5.120 +%%Dimensionality reduction before clustering is useful on large datasets. First, because the number of features in the reduced dataset is less than in the original dataset, the running time of clustering algorithms may be much less. Second, it is thought that some clustering algorithms may give better results on reduced data. Another use for dimensionality reduction is to visualize the relationships between regions after clustering. 5.121 + 5.122 +%%Another use for dimensionality reduction is to visualize the relationships between regions after clustering. For example, one might want to make a 2-D plot upon which each region is represented by a single point, and with the property that regions with similar gene expression profiles should be nearby on the plot (that is, the property that distance between pairs of points in the plot should be proportional to some measure of dissimilarity in gene expression). It is likely that no arrangement of the points on a 2-D plan will exactly satisfy this property; however, dimensionality reduction techniques allow one to find arrangements of points that approximately satisfy that property. Note that in this application, dimensionality reduction is being applied after clustering; whereas in the previous paragraph, we were talking about using dimensionality reduction before clustering. 5.123 5.124 5.125 \vspace{0.3cm}**Clustering genes rather than voxels** 5.126 - 5.127 - 5.128 Although the ultimate goal is to cluster the instances (voxels or pixels), one strategy to achieve this goal is to first cluster the features (genes). There are two ways that clusters of genes could be used. 5.129 5.130 Gene clusters could be used as part of dimensionality reduction: rather than have one feature for each gene, we could have one reduced feature for each gene cluster. 5.131 @@ -160,7 +155,7 @@ 5.132 The task of clustering both the instances and the features is called co-clustering, and there are a number of co-clustering algorithms. 5.133 5.134 === Related work === 5.135 -We are aware of five existing efforts to cluster spatial gene expression data. 5.136 +Some researchers have attempted to parcellate cortex on the basis of non-gene expression data. For example, \cite{schleicher_quantitative_2005}, \cite{annese_myelo-architectonic_2004}, \cite{schmitt_detection_2003}, and \cite{adamson_tracking_2005} associate spots on the cortex with the radial profile\footnote{A radial profile is a profile along a line perpendicular to the cortical surface.} of response to some stain (\cite{kruggel_analyzingneocortical_2003} uses MRI), extract features from this profile, and then use similarity between surface pixels to cluster. Features used include statistical moments, wavelets, and the excess mass functional. Some of these features are motivated by the presence of tangential lines of stain intensity which correspond to laminar structure. Some methods use standard clustering procedures, whereas others make use of the spatial nature of the data to look for sudden transitions, which are identified as areal borders. 5.137 5.138 \cite{thompson_genomic_2008} describes an analysis of the anatomy of 5.139 the hippocampus using the ABA dataset. In addition to manual analysis, 5.140 @@ -174,33 +169,35 @@ 5.141 5.142 AGEA\cite{ng_anatomic_2009} includes a preset hierarchial clustering of voxels based on a recursive bifurcation algorithm with correlation as the similarity metric. EMAGE\cite{venkataraman_emage_2008} allows the user to select a dataset from among a large number of alternatives, or by running a search query, and then to cluster the genes within that dataset. EMAGE clusters via hierarchial complete linkage clustering with un-centred correlation as the similarity score. 5.143 5.144 -\cite{chin_genome-scale_2007} clustered genes, starting out by selecting 135 genes out of 20,000 which had high variance over voxels and which were highly correlated with many other genes. They computed the matrix of (rank) correlations between pairs of these genes, and ordered the rows of this matrix as follows: "the first row of the matrix was chosen to show the strongest contrast between the highest and lowest correlation coefficient for that row. The remaining rows were then arranged in order of decreasing similarity using a least squares metric". The resulting matrix showed four clusters. For each cluster, prototypical spatial expression patterns were created by averaging the genes in the cluster. The prototypes were analyzed manually, without clustering voxels 5.145 - 5.146 -In an interesting twist, \cite{hemert_matching_2008} applies their technique for finding combinations of marker genes for the purpose of clustering genes around a "seed gene". The way they do this is by using the pattern of expression of the seed gene as the target image, and then searching for other genes which can be combined to reproduce this pattern. Those other genes which are found are considered to be related to the seed. The same team also describes a method\cite{van_hemert_mining_2007} for finding "association rules" such as, "if this voxel is expressed in by any gene, then that voxel is probably also expressed in by the same gene". This could be useful as part of a procedure for clustering voxels. 5.147 - 5.148 -In summary, although these projects obtained clusterings, there has not been much comparison between different algorithms or scoring methods, so it is likely that the best clustering method for this application has not yet been found. Also, none of these projects did a separate dimensionality reduction step before clustering pixels, none tried to cluster genes first in order to guide automated clustering of pixels into spatial regions, and none used co-clustering algorithms. 5.149 - 5.150 - 5.151 - 5.152 -=== Aim 3 === 5.153 +\cite{chin_genome-scale_2007} clustered genes, starting out by selecting 135 genes out of 20,000 which had high variance over voxels and which were highly correlated with many other genes. They computed the matrix of (rank) correlations between pairs of these genes, and ordered the rows of this matrix as follows: "the first row of the matrix was chosen to show the strongest contrast between the highest and lowest correlation coefficient for that row. The remaining rows were then arranged in order of decreasing similarity using a least squares metric". The resulting matrix showed four clusters. For each cluster, prototypical spatial expression patterns were created by averaging the genes in the cluster. The prototypes were analyzed manually, without clustering voxels. 5.154 + 5.155 +\cite{hemert_matching_2008} applies their technique for finding combinations of marker genes for the purpose of clustering genes around a "seed gene". They do this by using the pattern of expression of the seed gene as the target image, and then searching for other genes which can be combined to reproduce this pattern. Other genes which are found are considered to be related to the seed. The same team also describes a method\cite{van_hemert_mining_2007} for finding "association rules" such as, "if this voxel is expressed in by any gene, then that voxel is probably also expressed in by the same gene". This could be useful as part of a procedure for clustering voxels. 5.156 + 5.157 +In summary, although these projects obtained clusterings, there has not been much comparison between different algorithms or scoring methods, so it is likely that the best clustering method for this application has not yet been found. The projects using gene expression on cortex did not attempt to make use of the radial profile of gene expression. Also, none of these projects did a separate dimensionality reduction step before clustering pixels, none tried to cluster genes first in order to guide automated clustering of pixels into spatial regions, and none used co-clustering algorithms. 5.158 + 5.159 + 5.160 + 5.161 +=== Aim 3: apply the methods developed to the cerebral cortex === 5.162 5.163 \vspace{0.3cm}**Background** 5.164 5.165 The cortex is divided into areas and layers. Because of the cortical columnar organization, the parcellation of the cortex into areas can be drawn as a 2-D map on the surface of the cortex. In the third dimension, the boundaries between the areas continue downwards into the cortical depth, perpendicular to the surface. The layer boundaries run parallel to the surface. One can picture an area of the cortex as a slice of a six-layered cake\footnote{Outside of isocortex, the number of layers varies.}. 5.166 5.167 -Although it is known that different cortical areas have distinct roles in both normal functioning and in disease processes, there are no known marker genes for most cortical areas. When it is necessary to divide a tissue sample into cortical areas, this is a manual process that requires a skilled human to combine multiple visual cues and interpret them in the context of their approximate location upon the cortical surface. 5.168 - 5.169 -Even the questions of how many areas should be recognized in cortex, and what their arrangement is, are still not completely settled. A proposed division of the cortex into areas is called a cortical map. In the rodent, the lack of a single agreed-upon map can be seen by contrasting the recent maps given by Swanson\cite{swanson_brain_2003} on the one hand, and Paxinos and Franklin\cite{paxinos_mouse_2001} on the other. While the maps are certainly very similar in their general arrangement, significant differences remain in the details. 5.170 +It is known that different cortical areas have distinct roles in both normal functioning and in disease processes, yet there are no known marker genes for most cortical areas. When it is necessary to divide a tissue sample into cortical areas, this is a manual process that requires a skilled human to combine multiple visual cues and interpret them in the context of their approximate location upon the cortical surface. 5.171 + 5.172 +Even the questions of how many areas should be recognized in cortex, and what their arrangement is, are still not completely settled. A proposed division of the cortex into areas is called a cortical map. In the rodent, the lack of a single agreed-upon map can be seen by contrasting the recent maps given by Swanson\cite{swanson_brain_2003} on the one hand, and Paxinos and Franklin\cite{paxinos_mouse_2001} on the other. While the maps are certainly very similar in their general arrangement, significant differences remain. 5.173 5.174 \vspace{0.3cm}**The Allen Mouse Brain Atlas dataset** 5.175 5.176 -The Allen Mouse Brain Atlas (ABA) data were produced by doing in-situ hybridization on slices of male, 56-day-old C57BL/6J mouse brains. Pictures were taken of the processed slice, and these pictures were semi-automatically analyzed in order to create a digital measurement of gene expression levels at each location in each slice. Per slice, cellular spatial resolution is achieved. Using this method, a single physical slice can only be used to measure one single gene; many different mouse brains were needed in order to measure the expression of many genes. 5.177 - 5.178 -Next, an automated nonlinear alignment procedure located the 2D data from the various slices in a single 3D coordinate system. In the final 3D coordinate system, voxels are cubes with 200 microns on a side. There are 67x41x58 \= 159,326 voxels in the 3D coordinate system, of which 51,533 are in the brain\cite{ng_anatomic_2009}. 5.179 - 5.180 -Mus musculus, the common house mouse, is thought to contain about 22,000 protein-coding genes\cite{waterston_initial_2002}. The ABA contains data on about 20,000 genes in sagittal sections, out of which over 4,000 genes are also measured in coronal sections. Our dataset is derived from only the coronal subset of the ABA, because the sagittal data do not cover the entire cortex, and also has greater registration error\cite{ng_anatomic_2009}. Genes were selected by the Allen Institute for coronal sectioning based on, "classes of known neuroscientific interest... or through post hoc identification of a marked non-ubiquitous expression pattern"\cite{ng_anatomic_2009}. 5.181 - 5.182 -The ABA is not the only large public spatial gene expression dataset. Other such resources include GENSAT\cite{gong_gene_2003}, GenePaint\cite{visel_genepaint.org:atlas_2004}, its sister project GeneAtlas\cite{carson_digital_2005}, BGEM\cite{magdaleno_bgem:in_2006}, EMAGE\cite{venkataraman_emage_2008}, EurExpress\footnote{http://www.eurexpress.org/ee/; EurExpress data are also entered into EMAGE}, EADHB\footnote{http://www.ncl.ac.uk/ihg/EADHB/database/EADHB_database.html}, MAMEP\footnote{http://mamep.molgen.mpg.de/index.php}, Xenbase\footnote{http://xenbase.org/}, ZFIN\cite{sprague_zebrafish_2006}, Aniseed\footnote{http://aniseed-ibdm.univ-mrs.fr/}, VisiGene\footnote{http://genome.ucsc.edu/cgi-bin/hgVisiGene ; includes data from some the other listed data sources}, GEISHA\cite{bell_geishawhole-mount_2004}, Fruitfly.org\cite{tomancak_systematic_2002}, COMPARE\footnote{http://compare.ibdml.univ-mrs.fr/} GXD\cite{smith_mouse_2007}, GEO\cite{barrett_ncbi_2007}\footnote{GXD and GEO contain spatial data but also non-spatial data. All GXD spatial data are also in EMAGE.}. With the exception of the ABA, GenePaint, and EMAGE, most of these resources have not (yet) extracted the expression intensity from the ISH images and registered the results into a single 3-D space, and to our knowledge only ABA and EMAGE make this form of data available for public download from the website\footnote{without prior offline registration}. Many of these resources focus on developmental gene expression. 5.183 +The Allen Mouse Brain Atlas (ABA) data were produced by doing in-situ hybridization on slices of male, 56-day-old C57BL/6J mouse brains. Pictures were taken of the processed slice, and these pictures were semi-automatically analyzed to create a digital measurement of gene expression levels at each location in each slice. Per slice, cellular spatial resolution is achieved. Using this method, a single physical slice can only be used to measure one single gene; many different mouse brains were needed in order to measure the expression of many genes. 5.184 + 5.185 +An automated nonlinear alignment procedure located the 2D data from the various slices in a single 3D coordinate system. In the final 3D coordinate system, voxels are cubes with 200 microns on a side. There are 67x41x58 \= 159,326 voxels in the 3D coordinate system, of which 51,533 are in the brain\cite{ng_anatomic_2009}. 5.186 + 5.187 +Mus musculus is thought to contain about 22,000 protein-coding genes\cite{waterston_initial_2002}. The ABA contains data on about 20,000 genes in sagittal sections, out of which over 4,000 genes are also measured in coronal sections. Our dataset is derived from only the coronal subset of the ABA\footnote{The sagittal data do not cover the entire cortex, and also have greater registration error\cite{ng_anatomic_2009}. Genes were selected by the Allen Institute for coronal sectioning based on, "classes of known neuroscientific interest... or through post hoc identification of a marked non-ubiquitous expression pattern"\cite{ng_anatomic_2009}.}. 5.188 + 5.189 +%%The ABA is not the only large public spatial gene expression dataset. Other such resources include GENSAT\cite{gong_gene_2003}, GenePaint\cite{visel_genepaint.org:atlas_2004}, its sister project GeneAtlas\cite{carson_digital_2005}, BGEM\cite{magdaleno_bgem:in_2006}, EMAGE\cite{venkataraman_emage_2008}, EurExpress\footnote{http://www.eurexpress.org/ee/; EurExpress data are also entered into EMAGE}, EADHB\footnote{http://www.ncl.ac.uk/ihg/EADHB/database/EADHB_database.html}, MAMEP\footnote{http://mamep.molgen.mpg.de/index.php}, Xenbase\footnote{http://xenbase.org/}, ZFIN\cite{sprague_zebrafish_2006}, Aniseed\footnote{http://aniseed-ibdm.univ-mrs.fr/}, VisiGene\footnote{http://genome.ucsc.edu/cgi-bin/hgVisiGene ; includes data from some the other listed data sources}, GEISHA\cite{bell_geishawhole-mount_2004}, Fruitfly.org\cite{tomancak_systematic_2002}, COMPARE\footnote{http://compare.ibdml.univ-mrs.fr/} GXD\cite{smith_mouse_2007}, GEO\cite{barrett_ncbi_2007}\footnote{GXD and GEO contain spatial data but also non-spatial data. All GXD spatial data are also in EMAGE.}. With the exception of the ABA, GenePaint, and EMAGE, most of these resources have not (yet) extracted the expression intensity from the ISH images and registered the results into a single 3-D space, and to our knowledge only ABA and EMAGE make this form of data available for public download from the website\footnote{without prior offline registration}. Many of these resources focus on developmental gene expression. 5.190 + 5.191 +The ABA is not the only large public spatial gene expression dataset\footnote{Other such resources include GENSAT\cite{gong_gene_2003}, GenePaint\cite{visel_genepaint.org:atlas_2004}, its sister project GeneAtlas\cite{carson_digital_2005}, BGEM\cite{magdaleno_bgem:in_2006}, EMAGE\cite{venkataraman_emage_2008}, EurExpress (http://www.eurexpress.org/ee/; EurExpress data are also entered into EMAGE), EADHB (http://www.ncl.ac.uk/ihg/EADHB/database/EADHB_database.html), MAMEP (http://mamep.molgen.mpg.de/index.php), Xenbase (http://xenbase.org/), ZFIN\cite{sprague_zebrafish_2006}, Aniseed (http://aniseed-ibdm.univ-mrs.fr/), VisiGene (http://genome.ucsc.edu/cgi-bin/hgVisiGene ; includes data from some of the other listed data sources), GEISHA\cite{bell_geishawhole-mount_2004}, Fruitfly.org\cite{tomancak_systematic_2002}, COMPARE (http://compare.ibdml.univ-mrs.fr/), GXD\cite{smith_mouse_2007}, GEO\cite{barrett_ncbi_2007} (GXD and GEO contain spatial data but also non-spatial data. All GXD spatial data are also in EMAGE.)}. With the exception of the ABA, GenePaint, and EMAGE, most of the other resources have not (yet) extracted the expression intensity from the ISH images and registered the results into a single 3-D space, and to our knowledge only ABA and EMAGE make this form of data available for public download from the website\footnote{without prior offline registration}. Many of these resources focus on developmental gene expression. 5.192 5.193 5.194 5.195 @@ -210,7 +207,9 @@ 5.196 5.197 The application of the marker gene finding algorithm to the cortex will also support the development of new neuroanatomical methods. In addition to finding markers for each individual cortical areas, we will find a small panel of genes that can find many of the areal boundaries at once. This panel of marker genes will allow the development of an ISH protocol that will allow experimenters to more easily identify which anatomical areas are present in small samples of cortex. 5.198 5.199 -The method developed in aim (2) will provide a genoarchitectonic viewpoint that will contribute to the creation of a better map. The development of present-day cortical maps was driven by the application of histological stains. It is conceivable that if a different set of stains had been available which identified a different set of features, then the today's cortical maps would have come out differently. Since the number of classes of stains is small compared to the number of genes, it is likely that there are many repeated, salient spatial patterns in the gene expression which have not yet been captured by any stain. Therefore, current ideas about cortical anatomy need to incorporate what we can learn from looking at the patterns of gene expression. 5.200 + 5.201 +%% Since the number of classes of stains is small compared to the number of genes, 5.202 +The method developed in aim (2) will provide a genoarchitectonic viewpoint that will contribute to the creation of a better map. The development of present-day cortical maps was driven by the application of histological stains. If a different set of stains had been available which identified a different set of features, then today's cortical maps may have come out differently. It is likely that there are many repeated, salient spatial patterns in the gene expression which have not yet been captured by any stain. Therefore, cortical anatomy needs to incorporate what we can learn from looking at the patterns of gene expression. 5.203 5.204 5.205 While we do not here propose to analyze human gene expression data, it is conceivable that the methods we propose to develop could be used to suggest modifications to the human cortical map as well. 5.206 @@ -218,7 +217,7 @@ 5.207 5.208 === Related work === 5.209 5.210 -\cite{ng_anatomic_2009} describes the application of AGEA to the cortex. The paper describes interesting results on the structure of correlations between voxel gene expression profiles within a handful of cortical areas. However, this sort of analysis is not related to either of our aims, as it neither finds marker genes, nor does it suggest a cortical map based on gene expression data. Neither of the other components of AGEA can be applied to cortical areas; AGEA's Gene Finder cannot be used to find marker genes for the cortical areas; and AGEA's hierarchial clustering does not produce clusters corresponding to the cortical areas\footnote{In both cases, the root cause is that pairwise correlations between the gene expression of voxels in different areas but the same layer are often stronger than pairwise correlations between the gene expression of voxels in different layers but the same area. Therefore, a pairwise voxel correlation clustering algorithm will tend to create clusters representing cortical layers, not areas. This is why the hierarchial clustering does not find cortical areas (there are clusters which presumably correspond to the intersection of a layer and an area, but since one area will have many layer-area intersection clusters, further work is needed to make sense of these). The reason that Gene Finder cannot the find marker genes for cortical areas is that in Gene Finder, although the user chooses a seed voxel, Gene Finder chooses the ROI for which genes will be found, and it creates that ROI by (pairwise voxel correlation) clustering around the seed.}. 5.211 +\cite{ng_anatomic_2009} describes the application of AGEA to the cortex. The paper describes interesting results on the structure of correlations between voxel gene expression profiles within a handful of cortical areas. However, this sort of analysis is not related to either of our aims, as it neither finds marker genes, nor does it suggest a cortical map based on gene expression data. Neither of the other components of AGEA can be applied to cortical areas; AGEA's Gene Finder cannot be used to find marker genes for the cortical areas; and AGEA's hierarchial clustering does not produce clusters corresponding to the cortical areas\footnote{In both cases, the cause is that pairwise correlations between the gene expression of voxels in different areas but the same layer are often stronger than pairwise correlations between the gene expression of voxels in different layers but the same area. Therefore, a pairwise voxel correlation clustering algorithm will tend to create clusters representing cortical layers, not areas (there may be clusters which presumably correspond to the intersection of a layer and an area, but since one area will have many layer-area intersection clusters, further work is needed to make sense of these). The reason that Gene Finder cannot the find marker genes for cortical areas is that, although the user chooses a seed voxel, Gene Finder chooses the ROI for which genes will be found, and it creates that ROI by (pairwise voxel correlation) clustering around the seed.}. 5.212 5.213 5.214 %% Most of the projects which have been discussed have been done by the same groups that develop the public datasets. Although these projects make their algorithms available for use on their own website, none of them have released an open-source software toolkit; instead, users are restricted to using the provided algorithms only on their own dataset. 5.215 @@ -232,15 +231,15 @@ 5.216 \newpage 5.217 5.218 == Preliminary Studies == 5.219 -\begin{wrapfigure}{L}{0.4\textwidth}\centering 5.220 -%%\includegraphics[scale=.31]{singlegene_SS_corr_top_1_2365_jet.eps}\includegraphics[scale=.31]{singlegene_SS_corr_top_2_242_jet.eps}\includegraphics[scale=.31]{singlegene_SS_corr_top_3_654_jet.eps} 5.221 +\begin{wrapfigure}{L}{0.35\textwidth}\centering 5.222 +%%\includegraphics[scale=.27]{singlegene_SS_corr_top_1_2365_jet.eps}\includegraphics[scale=.27]{singlegene_SS_corr_top_2_242_jet.eps}\includegraphics[scale=.27]{singlegene_SS_corr_top_3_654_jet.eps} 5.223 %%\\ 5.224 -%%\includegraphics[scale=.31]{singlegene_SS_lr_top_1_654_jet.eps}\includegraphics[scale=.31]{singlegene_SS_lr_top_2_685_jet.eps}\includegraphics[scale=.31]{singlegene_SS_lr_top_3_724_jet.eps} 5.225 +%%\includegraphics[scale=.27]{singlegene_SS_lr_top_1_654_jet.eps}\includegraphics[scale=.27]{singlegene_SS_lr_top_2_685_jet.eps}\includegraphics[scale=.27]{singlegene_SS_lr_top_3_724_jet.eps} 5.226 %%\caption{Top row: Genes Nfic, A930001M12Rik, C130038G02Rik are the most correlated with area SS (somatosensory cortex). Bottom row: Genes C130038G02Rik, Cacna1i, Car10 are those with the best fit using logistic regression. Within each picture, the vertical axis roughly corresponds to anterior at the top and posterior at the bottom, and the horizontal axis roughly corresponds to medial at the left and lateral at the right. The red outline is the boundary of region SS. Pixels are colored according to correlation, with red meaning high correlation and blue meaning low.} 5.227 5.228 -\includegraphics[scale=.31]{singlegene_SS_corr_top_1_2365_jet.eps}\includegraphics[scale=.31]{singlegene_SS_corr_top_2_242_jet.eps} 5.229 +\includegraphics[scale=.27]{singlegene_SS_corr_top_1_2365_jet.eps}\includegraphics[scale=.27]{singlegene_SS_corr_top_2_242_jet.eps} 5.230 \\ 5.231 -\includegraphics[scale=.31]{singlegene_SS_lr_top_1_654_jet.eps}\includegraphics[scale=.31]{singlegene_SS_lr_top_2_685_jet.eps} 5.232 +\includegraphics[scale=.27]{singlegene_SS_lr_top_1_654_jet.eps}\includegraphics[scale=.27]{singlegene_SS_lr_top_2_685_jet.eps} 5.233 5.234 \caption{Top row: Genes $Nfic$ and $A930001M12Rik$ are the most correlated with area SS (somatosensory cortex). Bottom row: Genes $C130038G02Rik$ and $Cacna1i$ are those with the best fit using logistic regression. Within each picture, the vertical axis roughly corresponds to anterior at the top and posterior at the bottom, and the horizontal axis roughly corresponds to medial at the left and lateral at the right. The red outline is the boundary of region SS. Pixels are colored according to correlation, with red meaning high correlation and blue meaning low.} 5.235 \label{SScorrLr}\end{wrapfigure} 5.236 @@ -265,13 +264,15 @@ 5.237 5.238 At this point, the data are in the form of a number of 2-D matrices, all in registration, with the matrix entries representing a grid of points (pixels) over the cortical surface: 5.239 5.240 +\begin{wrapfigure}{L}{0.2\textwidth}\centering 5.241 +\includegraphics[scale=.27]{holeExample_2682_SS_jet.eps} 5.242 +\caption{Gene $Pitx2$ is selectively underexpressed in area SS.} 5.243 +\label{hole}\end{wrapfigure} 5.244 + 5.245 + 5.246 * A 2-D matrix whose entries represent the regional label associated with each surface pixel 5.247 * For each gene, a 2-D matrix whose entries represent the average expression level underneath each surface pixel 5.248 5.249 -\begin{wrapfigure}{L}{0.2\textwidth}\centering 5.250 -\includegraphics[scale=.31]{holeExample_2682_SS_jet.eps} 5.251 -\caption{Gene $Pitx2$ is selectively underexpressed in area SS.} 5.252 -\label{hole}\end{wrapfigure} 5.253 5.254 5.255 5.256 @@ -306,14 +307,14 @@ 5.257 One of the simplest methods in this class is to use correlation as the match score. We calculated the correlation between each gene and each cortical area. The top row of Figure \ref{SScorrLr} shows the three genes most correlated with area SS. 5.258 5.259 5.260 -\begin{wrapfigure}{L}{0.4\textwidth}\centering 5.261 -%%\includegraphics[scale=.31]{singlegene_AUD_lr_top_1_3386_jet.eps}\includegraphics[scale=.31]{singlegene_AUD_lr_top_2_1258_jet.eps}\includegraphics[scale=.31]{singlegene_AUD_lr_top_3_420_jet.eps} 5.262 +\begin{wrapfigure}{L}{0.35\textwidth}\centering 5.263 +%%\includegraphics[scale=.27]{singlegene_AUD_lr_top_1_3386_jet.eps}\includegraphics[scale=.27]{singlegene_AUD_lr_top_2_1258_jet.eps}\includegraphics[scale=.27]{singlegene_AUD_lr_top_3_420_jet.eps} 5.264 %% 5.265 -%%\includegraphics[scale=.31]{singlegene_AUD_gr_top_1_2856_jet.eps}\includegraphics[scale=.31]{singlegene_AUD_gr_top_2_420_jet.eps}\includegraphics[scale=.31]{singlegene_AUD_gr_top_3_2072_jet.eps} 5.266 +%%\includegraphics[scale=.27]{singlegene_AUD_gr_top_1_2856_jet.eps}\includegraphics[scale=.27]{singlegene_AUD_gr_top_2_420_jet.eps}\includegraphics[scale=.27]{singlegene_AUD_gr_top_3_2072_jet.eps} 5.267 %%\caption{The top row shows the three genes which (individually) best predict area AUD, according to logistic regression. The bottom row shows the three genes which (individually) best match area AUD, according to gradient similarity. From left to right and top to bottom, the genes are $Ssr1$, $Efcbp1$, $Aph1a$, $Ptk7$, $Aph1a$ again, and $Lepr$} 5.268 -\includegraphics[scale=.31]{singlegene_AUD_lr_top_1_3386_jet.eps}\includegraphics[scale=.31]{singlegene_AUD_lr_top_2_1258_jet.eps} 5.269 +\includegraphics[scale=.27]{singlegene_AUD_lr_top_1_3386_jet.eps}\includegraphics[scale=.27]{singlegene_AUD_lr_top_2_1258_jet.eps} 5.270 \\ 5.271 -\includegraphics[scale=.31]{singlegene_AUD_gr_top_1_2856_jet.eps}\includegraphics[scale=.31]{singlegene_AUD_gr_top_2_420_jet.eps} 5.272 +\includegraphics[scale=.27]{singlegene_AUD_gr_top_1_2856_jet.eps}\includegraphics[scale=.27]{singlegene_AUD_gr_top_2_420_jet.eps} 5.273 \caption{The top row shows the two genes which (individually) best predict area AUD, according to logistic regression. The bottom row shows the two genes which (individually) best match area AUD, according to gradient similarity. From left to right and top to bottom, the genes are $Ssr1$, $Efcbp1$, $Ptk7$, and $Aph1a$.} 5.274 \label{AUDgeometry}\end{wrapfigure} 5.275 5.276 @@ -327,10 +328,10 @@ 5.277 This finds pairs of genes which are most informative (at least at these discretization thresholds) relative to the question, "Is this surface pixel a member of the target area?". Its advantage over linear methods such as logistic regression is that it takes account of arbitrarily nonlinear relationships; for example, if the XOR of two variables predicts the target, conditional entropy would notice, whereas linear methods would not. 5.278 5.279 5.280 -\begin{wrapfigure}{L}{0.4\textwidth}\centering 5.281 -\includegraphics[scale=.31]{MO_vs_Wwc1_jet.eps}\includegraphics[scale=.31]{MO_vs_Mtif2_jet.eps} 5.282 - 5.283 -\includegraphics[scale=.31]{MO_vs_Wwc1_plus_Mtif2_jet.eps} 5.284 +\begin{wrapfigure}{L}{0.35\textwidth}\centering 5.285 +\includegraphics[scale=.27]{MO_vs_Wwc1_jet.eps}\includegraphics[scale=.27]{MO_vs_Mtif2_jet.eps} 5.286 + 5.287 +\includegraphics[scale=.27]{MO_vs_Wwc1_plus_Mtif2_jet.eps} 5.288 \caption{Upper left: $wwc1$. Upper right: $mtif2$. Lower left: wwc1 + mtif2 (each pixel's value on the lower left is the sum of the corresponding pixels in the upper row).} 5.289 \label{MOcombo}\end{wrapfigure} 5.290 5.291 @@ -352,19 +353,20 @@ 5.292 5.293 \vspace{0.3cm}**Gradient similarity provides information complementary to correlation** 5.294 5.295 -To show that gradient similarity can provide useful information that cannot be detected via pointwise analyses, consider Fig. \ref{AUDgeometry}. The top row of Fig. \ref{AUDgeometry} displays the 3 genes which most match area AUD, according to a pointwise method\footnote{For each gene, a logistic regression in which the response variable was whether or not a surface pixel was within area AUD, and the predictor variable was the value of the expression of the gene underneath that pixel. The resulting scores were used to rank the genes in terms of how well they predict area AUD.}. The bottom row displays the 3 genes which most match AUD according to a method which considers local geometry\footnote{For each gene the gradient similarity between (a) a map of the expression of each gene on the cortical surface and (b) the shape of area AUD, was calculated, and this was used to rank the genes.} The pointwise method in the top row identifies genes which express more strongly in AUD than outside of it; its weakness is that this includes many areas which don't have a salient border matching the areal border. The geometric method identifies genes whose salient expression border seems to partially line up with the border of AUD; its weakness is that this includes genes which don't express over the entire area. Genes which have high rankings using both pointwise and border criteria, such as $Aph1a$ in the example, may be particularly good markers. None of these genes are, individually, a perfect marker for AUD; we deliberately chose a "difficult" area in order to better contrast pointwise with geometric methods. 5.296 - 5.297 - 5.298 - 5.299 -\begin{wrapfigure}{L}{0.4\textwidth}\centering 5.300 -\includegraphics[scale=.31]{singlegene_example_2682_Pitx2_SS_jet.eps}\includegraphics[scale=.31]{singlegene_example_371_Aldh1a2_SSs_jet.eps} 5.301 -\includegraphics[scale=.31]{singlegene_example_2759_Ppfibp1_PIR_jet.eps}\includegraphics[scale=.31]{singlegene_example_3310_Slco1a5_FRP_jet.eps} 5.302 -\includegraphics[scale=.31]{singlegene_example_3709_Tshz2_RSP_jet.eps}\includegraphics[scale=.31]{singlegene_example_3674_Trhr_COApm_jet.eps} 5.303 -\includegraphics[scale=.31]{singlegene_example_925_Col12a1_ACA+PL+ILA+DP+ORB+MO_jet.eps}\includegraphics[scale=.31]{singlegene_example_1334_Ets1_post_lat_vis_jet.eps} 5.304 + 5.305 +\begin{wrapfigure}{L}{0.35\textwidth}\centering 5.306 +\includegraphics[scale=.27]{singlegene_example_2682_Pitx2_SS_jet.eps}\includegraphics[scale=.27]{singlegene_example_371_Aldh1a2_SSs_jet.eps} 5.307 +\includegraphics[scale=.27]{singlegene_example_2759_Ppfibp1_PIR_jet.eps}\includegraphics[scale=.27]{singlegene_example_3310_Slco1a5_FRP_jet.eps} 5.308 +\includegraphics[scale=.27]{singlegene_example_3709_Tshz2_RSP_jet.eps}\includegraphics[scale=.27]{singlegene_example_3674_Trhr_COApm_jet.eps} 5.309 +\includegraphics[scale=.27]{singlegene_example_925_Col12a1_ACA+PL+ILA+DP+ORB+MO_jet.eps}\includegraphics[scale=.27]{singlegene_example_1334_Ets1_post_lat_vis_jet.eps} 5.310 5.311 \caption{From left to right and top to bottom, single genes which roughly identify areas SS (somatosensory primary \begin{latex}+\end{latex} supplemental), SSs (supplemental somatosensory), PIR (piriform), FRP (frontal pole), RSP (retrosplenial), COApm (Cortical amygdalar, posterior part, medial zone). Grouping some areas together, we have also found genes to identify the groups ACA+PL+ILA+DP+ORB+MO (anterior cingulate, prelimbic, infralimbic, dorsal peduncular, orbital, motor), posterior and lateral visual (VISpm, VISpl, VISI, VISp; posteromedial, posterolateral, lateral, and primary visual; the posterior and lateral visual area is distinguished from its neighbors, but not from the entire rest of the cortex). The genes are $Pitx2$, $Aldh1a2$, $Ppfibp1$, $Slco1a5$, $Tshz2$, $Trhr$, $Col12a1$, $Ets1$.} 5.312 \label{singleSoFar}\end{wrapfigure} 5.313 5.314 +To show that gradient similarity can provide useful information that cannot be detected via pointwise analyses, consider Fig. \ref{AUDgeometry}. The top row of Fig. \ref{AUDgeometry} displays the 3 genes which most match area AUD, according to a pointwise method\footnote{For each gene, a logistic regression in which the response variable was whether or not a surface pixel was within area AUD, and the predictor variable was the value of the expression of the gene underneath that pixel. The resulting scores were used to rank the genes in terms of how well they predict area AUD.}. The bottom row displays the 3 genes which most match AUD according to a method which considers local geometry\footnote{For each gene the gradient similarity between (a) a map of the expression of each gene on the cortical surface and (b) the shape of area AUD, was calculated, and this was used to rank the genes.} The pointwise method in the top row identifies genes which express more strongly in AUD than outside of it; its weakness is that this includes many areas which don't have a salient border matching the areal border. The geometric method identifies genes whose salient expression border seems to partially line up with the border of AUD; its weakness is that this includes genes which don't express over the entire area. Genes which have high rankings using both pointwise and border criteria, such as $Aph1a$ in the example, may be particularly good markers. None of these genes are, individually, a perfect marker for AUD; we deliberately chose a "difficult" area in order to better contrast pointwise with geometric methods. 5.315 + 5.316 + 5.317 + 5.318 \vspace{0.3cm}**Areas which can be identified by single genes** 5.319 Using gradient similarity, we have already found single genes which roughly identify some areas and groupings of areas. For each of these areas, an example of a gene which roughly identifies it is shown in Figure \ref{singleSoFar}. We have not yet cross-verified these genes in other atlases. 5.320 5.321 @@ -396,10 +398,6 @@ 5.322 5.323 === Multivariate Predictive methods === 5.324 5.325 -\vspace{0.3cm}**Forward stepwise logistic regression** 5.326 -Logistic regression is a popular method for predictive modeling of categorial data. As a pilot run, for five cortical areas (SS, AUD, RSP, VIS, and MO), we performed forward stepwise logistic regression to find single genes, pairs of genes, and triplets of genes which predict areal identify. This is an example of feature selection integrated with prediction using a stepwise wrapper. Some of the single genes found were shown in various figures throughout this document, and Figure \ref{MOcombo} shows a combination of genes which was found. 5.327 - 5.328 -We felt that, for single genes, gradient similarity did a better job than logistic regression at capturing our subjective impression of a "good gene". 5.329 5.330 \begin{wrapfigure}{L}{0.6\textwidth}\centering 5.331 \includegraphics[scale=1]{merge3_norm_hv_PCA_ndims50_prototypes_collage_sm_border.eps} 5.332 @@ -411,6 +409,12 @@ 5.333 \label{dimReduc}\end{wrapfigure} 5.334 5.335 5.336 +\vspace{0.3cm}**Forward stepwise logistic regression** 5.337 +Logistic regression is a popular method for predictive modeling of categorial data. As a pilot run, for five cortical areas (SS, AUD, RSP, VIS, and MO), we performed forward stepwise logistic regression to find single genes, pairs of genes, and triplets of genes which predict areal identify. This is an example of feature selection integrated with prediction using a stepwise wrapper. Some of the single genes found were shown in various figures throughout this document, and Figure \ref{MOcombo} shows a combination of genes which was found. 5.338 + 5.339 +We felt that, for single genes, gradient similarity did a better job than logistic regression at capturing our subjective impression of a "good gene". 5.340 + 5.341 + 5.342 \vspace{0.3cm}**SVM on all genes at once** 5.343 5.344 In order to see how well one can do when looking at all genes at once, we ran a support vector machine to classify cortical surface pixels based on their gene expression profiles. We achieved classification accuracy of about 81%\footnote{5-fold cross-validation.}. This shows that the genes included in the ABA dataset are sufficient to define much of cortical anatomy. However, as noted above, a classifier that looks at all the genes at once isn't as practically useful as a classifier that uses only a few genes. 5.345 @@ -421,13 +425,15 @@ 5.346 5.347 === Data-driven redrawing of the cortical map === 5.348 5.349 -We have applied the following dimensionality reduction algorithms to reduce the dimensionality of the gene expression profile associated with each voxel: Principal Components Analysis (PCA), Simple PCA (SPCA), Multi-Dimensional Scaling (MDS), Isomap, Landmark Isomap, Laplacian eigenmaps, Local Tangent Space Alignment (LTSA), Hessian locally linear embedding, Diffusion maps, Stochastic Neighbor Embedding (SNE), Stochastic Proximity Embedding (SPE), Fast Maximum Variance Unfolding (FastMVU), Non-negative Matrix Factorization (NNMF). Space constraints prevent us from showing many of the results, but as a sample, PCA, NNMF, and landmark Isomap are shown in the first, second, and third rows of Figure \ref{dimReduc}. 5.350 - 5.351 -\begin{wrapfigure}{L}{0.6\textwidth}\centering 5.352 +\begin{wrapfigure}{L}{0.5\textwidth}\centering 5.353 \includegraphics[scale=.2]{cosine_similarity1_rearrange_colorize.eps} 5.354 \caption{Prototypes corresponding to sample gene clusters, clustered by gradient similarity. Region boundaries for the region that most matches each prototype are overlayed.} 5.355 \label{geneClusters}\end{wrapfigure} 5.356 5.357 + 5.358 + 5.359 +We have applied the following dimensionality reduction algorithms to reduce the dimensionality of the gene expression profile associated with each voxel: Principal Components Analysis (PCA), Simple PCA (SPCA), Multi-Dimensional Scaling (MDS), Isomap, Landmark Isomap, Laplacian eigenmaps, Local Tangent Space Alignment (LTSA), Hessian locally linear embedding, Diffusion maps, Stochastic Neighbor Embedding (SNE), Stochastic Proximity Embedding (SPE), Fast Maximum Variance Unfolding (FastMVU), Non-negative Matrix Factorization (NNMF). Space constraints prevent us from showing many of the results, but as a sample, PCA, NNMF, and landmark Isomap are shown in the first, second, and third rows of Figure \ref{dimReduc}. 5.360 + 5.361 After applying the dimensionality reduction, we ran clustering algorithms on the reduced data. To date we have tried k-means and spectral clustering. The results of k-means after PCA, NNMF, and landmark Isomap are shown in the last row of Figure \ref{dimReduc}. To compare, the leftmost picture on the bottom row of Figure \ref{dimReduc} shows some of the major subdivisions of cortex. These results clearly show that different dimensionality reduction techniques capture different aspects of the data and lead to different clusterings, indicating the utility of our proposal to produce a detailed comparion of these techniques as applied to the domain of genomic anatomy. 5.362 5.363 5.364 @@ -444,19 +450,19 @@ 5.365 5.366 \vspace{0.3cm}**Further work on flatmapping** 5.367 5.368 - 5.369 -In anatomy, the manifold of interest is usually either defined by a combination of two relevant anatomical axes (todo), or by the surface of the structure (as is the case with the cortex). In the former case, the manifold of interest is a plane, but in the latter case it is curved. If the manifold is curved, there are various methods for mapping the manifold into a plane. 5.370 - 5.371 -In the case of the cerebral cortex, it remains to be seen which method of mapping the manifold into a plane is optimal for this application. We will compare mappings which attempt to preserve size (such as the one used by Caret\cite{van_essen_integrated_2001}) with mappings which preserve angle (conformal maps). 5.372 - 5.373 -Although there is much 2-D organization in anatomy, there are also structures whose shape is fundamentally 3-dimensional. If possible, we would like the method we develop to include a statistical test that warns the user if the assumption of 2-D structure seems to be wrong. 5.374 - 5.375 - 5.376 -todo amongst other things: 5.377 - 5.378 - 5.379 -layerfinding 5.380 - 5.381 +%%In anatomy, the manifold of interest is usually either defined by a combination of two relevant anatomical axes (todo), or by the surface of the structure (as is the case with the cortex). In the former case, the manifold of interest is a plane, but in the latter case it is curved. If the manifold is curved, there are various methods for mapping the manifold into a plane. 5.382 + 5.383 +%%In the case of the cerebral cortex, it remains to be seen which method of mapping the manifold into a plane is optimal for this application. We will compare mappings which attempt to preserve size (such as the one used by Caret\cite{van_essen_integrated_2001}) with mappings which preserve angle (conformal maps). 5.384 + 5.385 + 5.386 +Often the surface of a structure serves as a natural 2-D basis for anatomical organization. Even when the shape of the surface is known, there are multiple ways to map it into a plane. We will compare mappings which attempt to preserve size (such as the one used by Caret\cite{van_essen_integrated_2001}) with mappings which preserve angle (conformal maps). Although there is much 2-D organization in anatomy, there are also structures whose anatomy is fundamentally 3-dimensional. We plan to include a statistical test that warns the user if the assumption of 2-D structure seems to be wrong. 5.387 + 5.388 +\vspace{0.3cm}**Automatic segmentation of cortical layers** 5.389 + 5.390 + 5.391 + 5.392 +\vspace{0.3cm}**Extension to probabalistic maps** 5.393 +Presently, we do not have a probabalistic atlas which is registered to the ABA space. However, in anticipation of the availability of such maps, we would like to explore extensions to our Aim 1 techniques which can handle probabalistic maps. 5.394 5.395 5.396 5.397 @@ -473,7 +479,7 @@ 5.398 \vspace{0.3cm}**Decision trees** 5.399 todo 5.400 5.401 -For each cortical area, we used the C4.5 algorithm to find a pruned decision tree and ruleset for that area. We achieved estimated classification accuracy of more than 99.6% on each cortical area (as evaluated on the __training data__ without cross-validation; so actual accuracy is expected to be lower). However, the resulting decision trees each made use of many genes. 5.402 +\footnote{Already, for each cortical area, we have used the C4.5 algorithm to find a decision tree for that area. We achieved good classification accuracy on our training set, but the number of genes that appeared in each tree was too large. We plan to implement a pruning procedure to generate trees that use fewer genes}. 5.403 5.404 5.405 \vspace{0.3cm}**Apply these algorithms to the cortex** 5.406 @@ -502,7 +508,8 @@ 5.407 5.408 # self-organizing map 5.409 5.410 -# confirm with EMAGE, GeneAtlas, GENSAT, etc, to fight overfitting 5.411 +# confirm with EMAGE, GeneAtlas, GENSAT, etc, to fight overfitting, two hemis 5.412 + 5.413 5.414 # compare using clustering scores 5.415 5.416 @@ -517,27 +524,11 @@ 5.417 \bibliographystyle{plain} 5.418 \bibliography{grant} 5.419 5.420 -\newpage 5.421 - 5.422 ----- 5.423 - 5.424 -stuff i dunno where to put yet (there is more scattered through grant-oldtext): 5.425 - 5.426 - 5.427 -\vspace{0.3cm}**Principle 4: Work in 2-D whenever possible** 5.428 - 5.429 - 5.430 - 5.431 +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 5.432 5.433 %%if we need citations for aim 3 significance, http://www.sciencedirect.com/science?_ob=ArticleURL&_udi=B6WSS-4V70FHY-9&_user=4429&_coverDate=12%2F26%2F2008&_rdoc=1&_fmt=full&_orig=na&_cdi=7054&_docanchor=&_acct=C000059602&_version=1&_urlVersion=0&_userid=4429&md5=551eccc743a2bfe6e992eee0c3194203#app2 has examples of genetic targeting to specific anatomical regions 5.434 5.435 ---- 5.436 - 5.437 -note: 5.438 - 5.439 - 5.440 - 5.441 - 5.442 -two hemis 5.443 - 5.444 - 5.445 + 5.446 + 5.447 + 5.448 +