# install BiocManager package if not installed yet.
# BiocManager is the package installer for Bioconductor software.
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
# install packages if not yet installed.
pkgs <- c("SingleCellExperiment",
"ExperimentHub",
"edgeR",
"biomaRt",
"DropletUtils",
"scRNAseq",
"scater",
"scuttle",
"scran",
"scry",
"BiocSingular",
"scDblFinder",
"Seurat",
"PCAtools",
"glmpca",
"genefilter",
"pheatmap",
"tidyverse",
"mclust",
"ggplot2",
"devtools",
"SingleR")
notInstalled <- pkgs[!pkgs %in% installed.packages()[,1]]
if(length(notInstalled) > 0){
BiocManager::install(notInstalled)
}
library(devtools)
install_github("immunogenomics/harmony",
dependencies = TRUE,
force = TRUE)
We here make use of the publication of Anna Cuomo et al., which we will refer to as the iPSC dataset
. The paper that describes this dataset can be found using this link.
In the experiment, the authors harvested pluripotent stem cells (iPSCs) from 125 healthy human donors, and induced them to study the endoderm differentiation process, in which iPSCs differentiate to endoderm cells over the course of approximately three days. As such, the authors cultered the iPSCs cell lines and allowed them to differentiate for three days. During the experiment, cells were harvested at four different time points: day0 (directly at incubation), day1, day2 and day3. Knowing the process of endoderm differentiation, these time points should roughly correspond to different cell types: day0 are (undifferentiated) iPSCs, day1 are mostly mesendoderm cells, day2 are mostly “intermediate” cells and day3 are mostly fully differentiated endoderm cells.
This dataset was generated using the SMART-Seq2 scRNA-seq protocol.
The final goal of the experiment was to characterize population variation in the process of endoderm differentiation.
For this lab session, we will work with a subset of the data, i.e., the data for the first (alphabetically) 15 patients in the experiment. These are the data you already downloaded for lab session 2 using the Belnet filesender link.
The original data (125 patient) could be downloaded from Zenodo. At the bottom of this web-page, we can download the files raw_counts.csv.zip
and cell_metadata_cols.tsv
and store these files locally. We do not recommend doing this during the lab session, to avoid overloading the WiFi network.
First we read in the count matrix:
library(SingleCellExperiment)
sce <- readRDS("/Users/jg/Desktop/sce_15_cuomo.rds")
#sce <- readRDS("/Users/koenvandenberge/Desktop/sce_15_cuomo.rds")
Exploration of the metadata is essential to get a better idea of what the experiment was about and how it was organized.
colData(sce)[1:5,1:10]
## DataFrame with 5 rows and 10 columns
## assigned auxDir cell_filter cell_name
## <integer> <character> <logical> <character>
## 21554_5#104 1 aux_info TRUE 21554_5#104
## 21554_5#110 1 aux_info TRUE 21554_5#110
## 21554_5#113 1 aux_info TRUE 21554_5#113
## 21554_5#117 1 aux_info TRUE 21554_5#117
## 21554_5#127 1 aux_info TRUE 21554_5#127
## compatible_fragment_ratio day donor expected_format
## <numeric> <character> <character> <character>
## 21554_5#104 0.999981 day2 dixh IU
## 21554_5#110 0.999964 day2 dixh IU
## 21554_5#113 0.999945 day2 dixh IU
## 21554_5#117 0.999916 day2 dixh IU
## 21554_5#127 0.999863 day2 dixh IU
## experiment frag_dist_length
## <character> <integer>
## 21554_5#104 expt_21 1001
## 21554_5#110 expt_21 1001
## 21554_5#113 expt_21 1001
## 21554_5#117 expt_21 1001
## 21554_5#127 expt_21 1001
colnames(colData(sce))
## [1] "assigned"
## [2] "auxDir"
## [3] "cell_filter"
## [4] "cell_name"
## [5] "compatible_fragment_ratio"
## [6] "day"
## [7] "donor"
## [8] "expected_format"
## [9] "experiment"
## [10] "frag_dist_length"
## [11] "gc_bias_correct"
## [12] "is_cell_control"
## [13] "is_cell_control_bulk"
## [14] "is_cell_control_control"
## [15] "library_types"
## [16] "libType"
## [17] "log10_total_counts"
## [18] "log10_total_counts_endogenous"
## [19] "log10_total_counts_ERCC"
## [20] "log10_total_counts_feature_control"
## [21] "log10_total_counts_MT"
## [22] "log10_total_features"
## [23] "log10_total_features_endogenous"
## [24] "log10_total_features_ERCC"
## [25] "log10_total_features_feature_control"
## [26] "log10_total_features_MT"
## [27] "mapping_type"
## [28] "mates1"
## [29] "mates2"
## [30] "n_alt_reads"
## [31] "n_total_reads"
## [32] "num_assigned_fragments"
## [33] "num_bias_bins"
## [34] "num_bootstraps"
## [35] "num_compatible_fragments"
## [36] "num_consistent_mappings"
## [37] "num_inconsistent_mappings"
## [38] "num_libraries"
## [39] "num_mapped"
## [40] "num_processed"
## [41] "num_targets"
## [42] "nvars_used"
## [43] "pct_counts_endogenous"
## [44] "pct_counts_ERCC"
## [45] "pct_counts_feature_control"
## [46] "pct_counts_MT"
## [47] "pct_counts_top_100_features"
## [48] "pct_counts_top_100_features_endogenous"
## [49] "pct_counts_top_100_features_feature_control"
## [50] "pct_counts_top_200_features"
## [51] "pct_counts_top_200_features_endogenous"
## [52] "pct_counts_top_50_features"
## [53] "pct_counts_top_50_features_endogenous"
## [54] "pct_counts_top_50_features_ERCC"
## [55] "pct_counts_top_50_features_feature_control"
## [56] "pct_counts_top_500_features"
## [57] "pct_counts_top_500_features_endogenous"
## [58] "percent_mapped"
## [59] "plate_id"
## [60] "plate_well_id"
## [61] "post_prob"
## [62] "public_name"
## [63] "read_files"
## [64] "salmon_version"
## [65] "samp_type"
## [66] "sample_id"
## [67] "seq_bias_correct"
## [68] "size_factor"
## [69] "start_time"
## [70] "strand_mapping_bias"
## [71] "total_counts"
## [72] "total_counts_endogenous"
## [73] "total_counts_ERCC"
## [74] "total_counts_feature_control"
## [75] "total_counts_MT"
## [76] "total_features"
## [77] "total_features_endogenous"
## [78] "total_features_ERCC"
## [79] "total_features_feature_control"
## [80] "total_features_MT"
## [81] "used_in_expt"
## [82] "well_id"
## [83] "well_type"
## [84] "donor_short_id"
## [85] "donor_long_id"
## [86] "pseudo"
## [87] "PC1_top100hvgs"
## [88] "PC1_top200hvgs"
## [89] "PC1_top500hvgs"
## [90] "PC1_top1000hvgs"
## [91] "PC1_top2000hvgs"
## [92] "princ_curve"
## [93] "princ_curve_scaled01"
As stated in the paper, cells were sampled on 4 time points. Each of these time points is roughly expected to correspond with different cell types (day0 = iPSC, day1 = mesendoderm, day2 = intermediate and day3 = endoderm).
table(colData(sce)$day)
##
## day0 day1 day2 day3
## 876 987 1124 890
As stated in the paper, cells were harvested from 125 patients. Here, we are working on a subset with 15 patients. The number of cells harvested per patient (over all time points) ranges from 31 to 637.
length(table(colData(sce)$donor)) # number of donors
## [1] 15
range(table(colData(sce)$donor)) # cells per donor
## [1] 31 637
Below, we look at how many cells are harvest per patent and per time point.
table(colData(sce)$donor,colData(sce)$day)
##
## day0 day1 day2 day3
## aowh 88 100 93 95
## aoxv 68 58 96 71
## babz 28 0 41 0
## bezi 13 11 4 3
## bima 0 0 44 31
## bokz 159 200 164 114
## cicb 42 21 75 26
## ciwj 40 27 35 39
## cuhk 41 47 39 27
## datg 185 147 136 115
## dixh 0 46 73 84
## eesb 66 106 103 195
## eipl 99 189 198 57
## eiwy 25 18 10 25
## eoxi 22 17 13 8
We see that for many patients the data is complete, i.e. cells were sampled on all time points.
Practically, the cells were prepared in 28 batches. Since we here only look at a subset of the data, we see that only 14 of these batches are represented.
length(table(colData(sce)$experiment))
## [1] 14
table(colData(sce)$experiment, colData(sce)$day)
##
## day0 day1 day2 day3
## expt_21 0 46 73 84
## expt_22 22 17 13 8
## expt_24 28 0 41 0
## expt_29 73 91 93 86
## expt_30 15 9 0 9
## expt_31 83 68 114 53
## expt_33 70 49 53 64
## expt_34 274 298 247 165
## expt_36 25 18 10 25
## expt_39 13 11 4 3
## expt_41 99 189 198 57
## expt_42 0 0 44 31
## expt_43 134 164 199 266
## expt_45 40 27 35 39
The rowData
slot of a SingleCellExperiment
object allows for storing information on the features, i.e. the genes, in a dataset. In our object, the rowData
slot currently contains the following:
head(rowData(sce))
## DataFrame with 6 rows and 1 column
## V1
## <character>
## 1 ENSG00000000003_TSPAN6
## 2 ENSG00000000419_DPM1
## 3 ENSG00000000457_SCYL3
## 4 ENSG00000000460_C1or..
## 5 ENSG00000001036_FUCA2
## 6 ENSG00000001084_GCLC
To improve our gene-level information, we may:
Split V1
into two columns, one with the ENSEMBL ID and the other with the gene symbol.
Display which chromosome the gene is located
Many more options are possible, but are not necessary for us right now.
rowData(sce) <- data.frame(Ensembl = gsub("_.*", "", rowData(sce)$V1),
Symbol = gsub("^[^_]*_", "", rowData(sce)$V1))
head(rowData(sce))
## DataFrame with 6 rows and 2 columns
## Ensembl Symbol
## <character> <character>
## 1 ENSG00000000003 TSPAN6
## 2 ENSG00000000419 DPM1
## 3 ENSG00000000457 SCYL3
## 4 ENSG00000000460 C1orf112
## 5 ENSG00000001036 FUCA2
## 6 ENSG00000001084 GCLC
# currently issues with ensembl server -> do not evaluate this chunk
library("biomaRt")
ensembl75 <- useEnsembl(biomart = 'genes',
dataset = 'hsapiens_gene_ensembl',
version = 75)
GeneInfo <- getBM(attributes = c("ensembl_gene_id", # To match with rownames SCE
"chromosome_name"), # Info on chromose
mart = ensembl75)
GeneInfo <- GeneInfo[match(rowData(sce)$Ensembl, GeneInfo$ensembl_gene_id),]
rowData(sce) <- cbind(rowData(sce), GeneInfo)
head(rowData(sce))
all(rowData(sce)$Ensembl == rowData(sce)$ensembl_gene_id)
# identical, as desired, so we could optionally remove one of the two
Let us first try the very simple and very lenient filtering criterion that we adopted for the Macosko dataset.
keep <- rowSums(assays(sce)$counts > 0) > 10
table(keep)
## keep
## TRUE
## 11231
We see that this filtering strategy does not remove any genes for this dataset. In general, datasets from plate-based scRNA-seq dataset have a far higher sequencing depth than data from droplet-based protocols. As requiring a minimum expression of 1 count in at least 10 cells is a very lenient criterion if we consider that we are working 36.000 cells, we should consider adopting a more stringent filtering criterium. Below we do so using the filterByExpr
from edgeR
:
library(edgeR)
table(colData(sce)$day)
##
## day0 day1 day2 day3
## 876 987 1124 890
keep2 <- edgeR::filterByExpr(y=sce,
group = colData(sce)$day,
min.count = 5,
min.prop = 0.4)
table(keep2)
## keep2
## FALSE TRUE
## 857 10374
sce <- sce[keep2,]
library(scater)
## Loading required package: scuttle
## Loading required package: ggplot2
##
## Attaching package: 'scater'
## The following object is masked from 'package:limma':
##
## plotMDS
# check ERCC spike-in transcripts
sum(grepl("^ERCC-", rowData(sce)$Symbol)) # no spike-in transcripts available
## [1] 0
is.mito <- grepl("^MT", rowData(sce)$chromosome_name)
sum(is.mito) # 13 mitochondrial genes
## [1] 0
df <- perCellQCMetrics(sce, subsets=list(Mito=is.mito))
head(df)
## DataFrame with 6 rows and 6 columns
## sum detected subsets_Mito_sum subsets_Mito_detected
## <numeric> <numeric> <numeric> <numeric>
## 21554_5#104 138676.3 5305 0 0
## 21554_5#110 685123.5 5927 0 0
## 21554_5#113 1671911.4 5613 0 0
## 21554_5#117 90419.4 6066 0 0
## 21554_5#127 59463.2 6549 0 0
## 21554_5#128 416482.7 7870 0 0
## subsets_Mito_percent total
## <numeric> <numeric>
## 21554_5#104 0 138676.3
## 21554_5#110 0 685123.5
## 21554_5#113 0 1671911.4
## 21554_5#117 0 90419.4
## 21554_5#127 0 59463.2
## 21554_5#128 0 416482.7
## add the QC variables to sce object
colData(sce) <- cbind(colData(sce), df)
In the figure below, we see that several cells have a very low number of expressed genes, and where most of the molecules are derived from mitochondrial genes. This indicates likely damaged cells, presumably because of loss of cytoplasmic RNA from perforated cells, so we should remove these for the downstream analysis.
# Number of genes vs library size
plotColData(sce, x = "sum", y="detected", colour_by="day")
# Mitochondrial genes
plotColData(sce, x = "detected", y="subsets_Mito_percent", colour_by="day")
Below, we remove cells that are outlying with respect to
We remove a total of \(301\) cells, mainly due to low sequencing depth and low number of genes detected.
lowLib <- isOutlier(df$sum, type="lower", log=TRUE)
lowFeatures <- isOutlier(df$detected, type="lower", log=TRUE)
highMito <- isOutlier(df$subsets_Mito_percent, type="higher")
table(lowLib)
## lowLib
## FALSE TRUE
## 3676 201
table(lowFeatures)
## lowFeatures
## FALSE TRUE
## 3813 64
table(highMito)
## highMito
## FALSE
## 3877
discardCells <- (lowLib | lowFeatures | highMito)
table(discardCells)
## discardCells
## FALSE TRUE
## 3633 244
colData(sce)$discardCells <- discardCells
# visualize cells to be removed
plotColData(sce, x = "detected", y="subsets_Mito_percent", colour_by = "discardCells")
plotColData(sce, x = "sum", y="detected", colour_by="discardCells")
# visualize cells to be removed
plotColData(sce, x = "detected", y="subsets_Mito_percent", colour_by = "donor")
plotColData(sce, x = "sum", y="detected", colour_by="donor")
# visualize cells to be removed
plotColData(sce, x = "detected", y="subsets_Mito_percent", colour_by = "experiment")
plotColData(sce, x = "sum", y="detected", colour_by="experiment")
table(sce$donor, sce$discardCells)
##
## FALSE TRUE
## aowh 367 9
## aoxv 284 9
## babz 44 25
## bezi 30 1
## bima 73 2
## bokz 625 12
## cicb 161 3
## ciwj 135 6
## cuhk 147 7
## datg 569 14
## dixh 90 113
## eesb 452 18
## eipl 537 6
## eiwy 77 1
## eoxi 42 18
table(sce$donor, sce$discardCells)/rowSums(table(sce$donor, sce$discardCells))
##
## FALSE TRUE
## aowh 0.97606383 0.02393617
## aoxv 0.96928328 0.03071672
## babz 0.63768116 0.36231884
## bezi 0.96774194 0.03225806
## bima 0.97333333 0.02666667
## bokz 0.98116170 0.01883830
## cicb 0.98170732 0.01829268
## ciwj 0.95744681 0.04255319
## cuhk 0.95454545 0.04545455
## datg 0.97598628 0.02401372
## dixh 0.44334975 0.55665025
## eesb 0.96170213 0.03829787
## eipl 0.98895028 0.01104972
## eiwy 0.98717949 0.01282051
## eoxi 0.70000000 0.30000000
#fractions of removed cells per donor
Most removed cells (fraction) are from patients dixh
and babz
.
table(sce$experiment, sce$discardCells)
##
## FALSE TRUE
## expt_21 90 113
## expt_22 42 18
## expt_24 44 25
## expt_29 336 7
## expt_30 31 2
## expt_31 308 10
## expt_33 227 9
## expt_34 967 17
## expt_36 77 1
## expt_39 30 1
## expt_41 537 6
## expt_42 73 2
## expt_43 736 27
## expt_45 135 6
table(sce$experiment, sce$donor)
##
## aowh aoxv babz bezi bima bokz cicb ciwj cuhk datg dixh eesb eipl eiwy
## expt_21 0 0 0 0 0 0 0 0 0 0 203 0 0 0
## expt_22 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## expt_24 0 0 69 0 0 0 0 0 0 0 0 0 0 0
## expt_29 343 0 0 0 0 0 0 0 0 0 0 0 0 0
## expt_30 33 0 0 0 0 0 0 0 0 0 0 0 0 0
## expt_31 0 0 0 0 0 0 164 0 154 0 0 0 0 0
## expt_33 0 0 0 0 0 0 0 0 0 236 0 0 0 0
## expt_34 0 0 0 0 0 637 0 0 0 347 0 0 0 0
## expt_36 0 0 0 0 0 0 0 0 0 0 0 0 0 78
## expt_39 0 0 0 31 0 0 0 0 0 0 0 0 0 0
## expt_41 0 0 0 0 0 0 0 0 0 0 0 0 543 0
## expt_42 0 0 0 0 75 0 0 0 0 0 0 0 0 0
## expt_43 0 293 0 0 0 0 0 0 0 0 0 470 0 0
## expt_45 0 0 0 0 0 0 0 141 0 0 0 0 0 0
##
## eoxi
## expt_21 0
## expt_22 60
## expt_24 0
## expt_29 0
## expt_30 0
## expt_31 0
## expt_33 0
## expt_34 0
## expt_36 0
## expt_39 0
## expt_41 0
## expt_42 0
## expt_43 0
## expt_45 0
Most low library sizes seem to come from patient dixh
; for patient babz
the effect is less pronounced.
plotColData(sce[,sce$donor=="dixh"], x = "sum", y="detected")
plotColData(sce[,sce$donor=="babz"], x = "sum", y="detected")
As such, we are mainly removing cells from specific patients and the respective batches in which they were sequenced. However, we want to be careful; we only want to remove technical artefacts, while retaining as much of the biology as possible. In our exploratory figure, we see that the cells we are removing based on the number of genes detected, are quite far apart from the bulk of the data cloud; as such, these cells may be considered suspicious. For the criterion of library size, we see that the cells removed there are still strongly connected to the data cloud. As such, we may want to relax the filtering criterion there a little bit. When we think of how the adaptive threshold strategy works, we may want to remove cells that are 4MADs away from the center, rather than the default 3 MADs.
# previously
lowLib <- isOutlier(df$sum, type="lower", log=TRUE)
table(lowLib)
## lowLib
## FALSE TRUE
## 3676 201
# after seeing appropriate exploratory figure
lowLib <- isOutlier(df$sum, nmads=4, type="lower", log=TRUE)
table(lowLib)
## lowLib
## FALSE TRUE
## 3783 94
discardCells <- (lowLib | lowFeatures | highMito)
table(discardCells)
## discardCells
## FALSE TRUE
## 3731 146
colData(sce)$discardCells <- discardCells
Note that these steps are not exact; different analysts will arrive to different filtering criteria for many of the steps. The key ideas are that we let appropriate exploratory figures guide us to make reasonable choices; i.e., we look at the data rather than blindly following a standardized pipeline that may work well in many cases, but maybe not our particular dataset.
# remove cells identified using adaptive thresholds
sce <- sce[, !colData(sce)$discardCells]
For normalization, the size factors \(s_i\) computed here are simply scaled library sizes:
\[ N_i = \sum_g Y_{gi} \] \[ s_i = N_i / \bar{N}_i \]
sce <- logNormCounts(sce)
# note we also returned log counts: see the additional logcounts assay.
sce
## class: SingleCellExperiment
## dim: 10374 3731
## metadata(0):
## assays(2): counts logcounts
## rownames: NULL
## rowData names(2): Ensembl Symbol
## colnames(3731): 21554_5#128 21554_5#142 ... 24947_6#91 24947_6#98
## colData names(101): assigned auxDir ... discardCells sizeFactor
## reducedDimNames(0):
## mainExpName: NULL
## altExpNames(0):
# you can extract size factors using
sf <- librarySizeFactors(sce)
mean(sf) # equal to 1 due to scaling.
## [1] 1
plot(x= log(colSums(assays(sce)$counts)),
y=sf)
— end lab session 1 —
library(scran)
rownames(sce) <- rowData(sce)$Ensembl
dec <- modelGeneVar(sce)
head(dec)
## DataFrame with 6 rows and 6 columns
## mean total tech bio p.value FDR
## <numeric> <numeric> <numeric> <numeric> <numeric> <numeric>
## ENSG00000000003 5.452309 0.863745 1.28581 -0.4220675 0.773732 0.882711
## ENSG00000000419 5.832487 1.028264 1.07766 -0.0493993 0.541773 0.882402
## ENSG00000000457 0.760778 1.175215 1.72646 -0.5512453 0.767517 0.882402
## ENSG00000000460 3.112458 1.545048 2.62346 -1.0784118 0.826574 0.888076
## ENSG00000001036 3.570987 2.180611 2.44778 -0.2671676 0.598622 0.882402
## ENSG00000001084 1.698901 2.386060 2.55583 -0.1697705 0.560411 0.882402
fit <- metadata(dec)
plot(fit$mean, fit$var,
xlab="Mean of log-expression",
ylab="Variance of log-expression")
curve(fit$trend(x), col="dodgerblue", add=TRUE, lwd=2)
# get top 1000 highly variable genes
hvg <- getTopHVGs(dec,
n=1000)
head(hvg)
## [1] "ENSG00000147869" "ENSG00000158815" "ENSG00000095596" "ENSG00000104371"
## [5] "ENSG00000185155" "ENSG00000120937"
# plot these
plot(fit$mean, fit$var,
col = c("orange", "darkseagreen3")[(names(fit$mean) %in% hvg)+1],
xlab="Mean of log-expression",
ylab="Variance of log-expression")
curve(fit$trend(x), col="dodgerblue", add=TRUE, lwd=2)
legend("topleft",
legend = c("Selected", "Not selected"),
col = c("darkseagreen3", "orange"),
pch = 16,
bty='n')
set.seed(1234)
sce <- runPCA(sce,
ncomponents=30,
subset_row=hvg)
plotPCA(sce,
colour_by = "day")
PCA has been performed. The PCA information has been automatically stored in the reducedDim
slot of the SingleCellExperiment
object.
reducedDimNames(sce)
## [1] "PCA"
head(reducedDim(sce,
type="PCA"))
## PC1 PC2 PC3 PC4 PC5 PC6
## 21554_5#128 -27.364852 9.594060 11.373564 30.76916 15.394118 -27.80931
## 21554_5#142 -26.726070 8.421937 8.355765 32.85938 -6.317488 -23.51960
## 21554_5#174 -16.417046 16.397747 9.339535 29.60662 20.174990 -23.62026
## 21554_5#176 -4.164451 15.322549 23.551281 26.57112 31.728127 -19.74506
## 21554_5#181 -22.143156 7.574105 9.966680 36.27930 12.204297 -26.26973
## 21554_5#183 -15.698590 15.525954 -13.216317 33.72004 -2.447697 -30.98004
## PC7 PC8 PC9 PC10 PC11 PC12
## 21554_5#128 -9.9341013 1.1274408 1.9843292 2.089876 -6.675600 -2.5301641
## 21554_5#142 8.7769917 9.3930201 -15.5791940 -11.423582 -1.627703 -4.0035113
## 21554_5#174 10.2249020 -1.3204708 -6.4121141 1.720833 17.826228 9.1196366
## 21554_5#176 -0.6745669 -0.9984625 -9.3857501 -4.181246 22.207032 4.5883345
## 21554_5#181 -0.9321336 -0.2769346 4.6500493 2.622540 -6.233380 0.2515426
## 21554_5#183 7.3013430 6.9185899 -0.2620361 -4.045207 -3.049075 -2.0505240
## PC13 PC14 PC15 PC16 PC17 PC18
## 21554_5#128 -3.4087788 0.02235213 1.420843 0.7293154 1.641542 0.6151673
## 21554_5#142 -9.8428164 -8.27401019 -1.105121 -3.1555296 -3.133926 -1.3455544
## 21554_5#174 -5.6386833 -2.31652358 4.070325 1.2707437 5.098201 0.3038039
## 21554_5#176 0.2916532 -14.56508569 -6.339897 -2.6535029 5.029821 -0.7144418
## 21554_5#181 -4.9837082 8.00451503 4.048105 9.2870735 4.160989 1.2141390
## 21554_5#183 -2.9067374 1.20335452 5.120246 3.1371635 6.098355 -2.2775478
## PC19 PC20 PC21 PC22 PC23 PC24
## 21554_5#128 5.2011841 -6.907664 -6.2647843 3.606747 -1.7431652 -1.5744543
## 21554_5#142 -0.3342526 9.669928 -0.8871485 -4.424970 11.8442676 1.7455513
## 21554_5#174 0.3691554 2.859107 3.0277004 -2.735873 5.1592166 -5.9721144
## 21554_5#176 -5.3943048 -2.840958 2.5375172 1.622524 4.6667312 -5.6725527
## 21554_5#181 3.2186974 -3.446394 -0.1923339 -2.137403 -0.7645622 -0.3988127
## 21554_5#183 2.0889075 -2.666797 0.1332723 1.779886 3.6862353 2.0159151
## PC25 PC26 PC27 PC28 PC29 PC30
## 21554_5#128 -0.102636 0.7537463 -3.5420691 3.7249324 5.837253 -1.4452956
## 21554_5#142 -3.035680 1.2788784 4.0515276 1.7271103 -4.905858 -3.7472445
## 21554_5#174 -6.154168 -6.3600204 -0.2767746 4.4168734 7.188644 -3.2498909
## 21554_5#176 -3.992011 -4.7046345 -0.4531632 2.8712394 2.112673 -9.8468144
## 21554_5#181 2.443517 0.9264037 -0.4445118 4.1346673 -1.364736 2.1226600
## 21554_5#183 -2.209305 -3.0687859 0.1412868 0.4865902 2.936611 0.5889153
The plotPCA
function of the scater
package now allows us to visualize the cells in PCA space, based on the PCA information stored in our object:
plotPCA(sce,
colour_by = "day")
We see that for this dataset, PCA is able to distinguish between the different developmental stages quite well.
library(glmpca)
set.seed(211103)
poipca <- glmpca(Y = assays(sce)$counts[hvg,],
L = 2,
fam = "poi",
minibatch = "stochastic")
reducedDim(sce, "PoiPCA") <- poipca$factors
plotReducedDim(sce,
dimred="PoiPCA",
colour_by = "day")
Using glmpca
, we observe a similar reduced dimension plot as for the classical PCA approach, with reasonable separation between cells of different developmental stages.
set.seed(8778)
sce <- runTSNE(sce,
dimred = 'PCA',
external_neighbors=TRUE)
plotTSNE(sce,
colour_by = "day")
In this 2D t-SNE space, it is clear that cells of different developmental stages cluster separately. However, there appears to be some heterogeneity. We observe multiple clusters of cells sampled at the same time point. In addition, while still clustering separately, some clusters of cells of different time points are still very close together in 2D space.
We will explore this phenomenon in more detail later.
set.seed(65187)
sce <- runUMAP(sce,
dimred = "PCA",
min_dist = 0.4,
n_dimred = 12,
external_neighbors = TRUE)
plotUMAP(sce,
colour_by = "day")
We observe a very similar pattern as for the t-SNE above in this UMAP; cells of different developmental stages cluster separately, however, there seems to be an additional level of heterogeneity in the data.
— end lab session 2 —
In this experiment, our main interest is to study the endoderm differentiation process, i.e. the 4-day differentiation process of induced pluripotent stem cells (iPSCs) at day0, via mesendoderm cells (day1) and another intermediate stage (day2) to endoderm cells (day3).
However, we will need to account for the fact that the cells have been sampled from 15 different subject, thus introducing additional biological heterogeneity. There are two variables in the colData
of our SingleCellExperiment
object that are useful for exploring this:
table(sce$donor,sce$experiment)
##
## expt_21 expt_22 expt_24 expt_29 expt_30 expt_31 expt_33 expt_34 expt_36
## aowh 0 0 0 342 32 0 0 0 0
## aoxv 0 0 0 0 0 0 0 0 0
## babz 0 0 54 0 0 0 0 0 0
## bezi 0 0 0 0 0 0 0 0 0
## bima 0 0 0 0 0 0 0 0 0
## bokz 0 0 0 0 0 0 0 635 0
## cicb 0 0 0 0 0 164 0 0 0
## ciwj 0 0 0 0 0 0 0 0 0
## cuhk 0 0 0 0 0 152 0 0 0
## datg 0 0 0 0 0 0 235 346 0
## dixh 109 0 0 0 0 0 0 0 0
## eesb 0 0 0 0 0 0 0 0 0
## eipl 0 0 0 0 0 0 0 0 0
## eiwy 0 0 0 0 0 0 0 0 78
## eoxi 0 47 0 0 0 0 0 0 0
##
## expt_39 expt_41 expt_42 expt_43 expt_45
## aowh 0 0 0 0 0
## aoxv 0 0 0 292 0
## babz 0 0 0 0 0
## bezi 30 0 0 0 0
## bima 0 0 73 0 0
## bokz 0 0 0 0 0
## cicb 0 0 0 0 0
## ciwj 0 0 0 0 137
## cuhk 0 0 0 0 0
## datg 0 0 0 0 0
## dixh 0 0 0 0 0
## eesb 0 0 0 466 0
## eipl 0 539 0 0 0
## eiwy 0 0 0 0 0
## eoxi 0 0 0 0 0
We have cells from 15 different patients and 14 different “experiments” (= sequencing batches).
We now will assess if this additional source of heterogeneity is also picked up in the reduced dimension plot.
# time effect in PCA space, all time points
plotPCA(sce,
colour_by = "day")
# donor (nuisance) effect in PCA space, all time points
plotPCA(sce,
colour_by = "donor")
# experiment (nuisance) effect in PCA space, all time points
plotPCA(sce,
colour_by = "experiment")
We see that within a certain time point, cells of the same patient/experiment seem to cluster together at least to some extent. This effect becomes clearer when we visualize the data of the different time points separately.
# donor effect in PCA space, per time point
plotPCA(sce[,sce$day=="day0"],
colour_by = "donor")
plotPCA(sce[,sce$day=="day1"],
colour_by = "donor")
plotPCA(sce[,sce$day=="day2"],
colour_by = "donor")
plotPCA(sce[,sce$day=="day3"],
colour_by = "donor")
Analogously, we may inspect the effect of patient and experiment in the UMAP visualization we created earlier.
# time effect
plotUMAP(sce,
colour_by = "day")
# nuisance effects in UMAP space, all time points
plotUMAP(sce,
colour_by = "donor")
plotUMAP(sce,
colour_by = "experiment")
As expected, we see that the additional heterogeneity observed in the clusters colored based on the time points can be explained by the patient effects.
In this experiment, the primary interest are the changes in gene expression across the four days, reflecting differentiation from induced pluripotent stem cells to endoderm cells. In contrast, the between-patient effects are not of interest here. Using batch correction, we will aim to “correct” for the donor effects, while hopefully retaining the main biological variation of interest!
We will explore two popular strategies for batch correction for scRNA-Seq data: Seurat CCA and Harmony.
We will first integrate the data across the different donors using Seurat
. This procedure is implemented to integrate datasets/batches in a pairwise fashion. In the case of multiple batches/datasets, it integrates them in a bottom-up strategy, starting with integrating pairwise samples first. Because of this, below, we will introduce the methodology for two batches/datasets, but note that an analogous procedure is applied in the case of multiple batches/datasets.
The Seurat
method will first perform feature selection to identify features that are informative in all datasets. Using these features, it will perform a canonical correlation analysis; a dimensionality reduction technique that focusses on shared variation between datasets/batches. The CCA dimensions may be viewed as gene modules that are present in each dataset. Within this shared space, it identifies anchor cells (one for each donor, in our case). These anchor cells may be considered as cells sharing the same biological state, and systematic differences between them correspond to batch/dataset-specific effects. A correction on the original gene expression matrix is then applied by considering systematic differences across all anchor cells identified for each pair of batches/datasets.
library(Seurat)
seurat_obj <- as.Seurat(sce)
seurat_obj
## An object of class Seurat
## 10374 features across 3731 samples within 1 assay
## Active assay: originalexp (10374 features, 0 variable features)
## 4 dimensional reductions calculated: PCA, PoiPCA, TSNE, UMAP
In the code chunk below, I remove cells for patients that have less or equal than 30 cells. If we do not do this, we will get issues downstream with the Seurat functions FindIntegrationAnchors
and IntegrateData
, which break down when the number of cells per batch is small. This is a known issue
and the package maintainers suggest to either remove or manually merge small batches together. We will here simply remove the cells of patient “bezi”.
table(seurat_obj$donor)
##
## aowh aoxv babz bezi bima bokz cicb ciwj cuhk datg dixh eesb eipl eiwy eoxi
## 374 292 54 30 73 635 164 137 152 581 109 466 539 78 47
table(seurat_obj$donor)[table(seurat_obj$donor) <= 30]
## bezi
## 30
seurat_obj <- seurat_obj[,-which(seurat_obj$donor == names(table(seurat_obj$donor)[table(seurat_obj$donor) <= 30]))]
After this, the SplitObject
object function is used to generate a list of Seurat
objects, where each list elements hold the data of 1 batch (patient):
seurat_obj.list <- SplitObject(seurat_obj, split.by = "donor")
nlevels(as.factor(sce$donor)) # originally 15 patients
## [1] 15
length(seurat_obj.list) # 14 patients left
## [1] 14
Next, Seurat will perform the following steps for batch correction:
NormalizeData
: by default, takes the count assay of the Seurat
object and performs a log-transformation, resulting in an additional log-transformed assay. This is performed for each batch separately.
FindVariableFeatures
: Feature selection, using the variance-stabilizing transformation (VST) from Seurat
, which amounts to calculating Pearson residuals from a regularized negative binomial regression model, with sequencing depth as a covariate. This is performed for each batch separately.
SelectIntegrationFeatures
: Choose the features to use when integrating multiple datasets or batches. This function ranks features by the number of batches they are deemed variable in, breaking ties by the median variable feature rank across datasets. It returns the top scoring features by this ranking, which are then used in the steps below.
FindIntegrationAnchors
: For each pair of datasets we want to integrate, we identify anchor cells, one from each dataset, that are assumed to share a similar biological state. Anchor cells are identified as mutual nearest neighbors in the shared canonical correlation analysis space. Anchor cells are also scored and weighted according to their quality.
IntegrateData
: Anchor cells are used to calculate a ‘corrected’ data matrix, removing systematic differences between anchor cells. This creates an integrated
assay in the Seurat
object containing this corrected data matrix, which may then be used for downstream visualization and analysis as such.
# Normalize and identify variable features for each dataset (patient) independently
seurat_obj.list <- lapply(X = seurat_obj.list, FUN = function(x) {
x <- NormalizeData(x,verbose = FALSE)
x <- FindVariableFeatures(x,
selection.method = "vst",
nfeatures = 1000,
verbose = FALSE)
})
# Select features that are repeatedly variable across datasets for integration
features <- SelectIntegrationFeatures(object.list = seurat_obj.list)
# Indentify pairs of cells with similar biological state
anchors <- FindIntegrationAnchors(object.list = seurat_obj.list,
anchor.features = features,
verbose = FALSE)
# This command creates an 'integrated' data assay. We have set the `k.weight`
# argument, which specifies the Number of neighbors to consider when weighting
# anchors, to 30 (default is 100). This was necessary to avoid errors, since we
# have many batches with less than 100 cells.
data.combined <- IntegrateData(anchorset = anchors,
k.weight = 30,
verbose=FALSE)
data.combined
## An object of class Seurat
## 12374 features across 3701 samples within 2 assays
## Active assay: integrated (2000 features, 2000 variable features)
## 1 other assay present: originalexp
Now, we have a new Seurat
object, which again is a single object (not a list) storing the expression values of all cells after batch correction. Note that the original expression values are still present in the originalexp assay of the object.
Finally, we may use Seurat
functions for performing dimension reduction and visualization of the batch corrected data.
# Run the standard Seurat workflow for visualization and clustering
# Step 1: Scales and centers features in the dataset, prior step to PCA
data.combined <- ScaleData(object = data.combined,
verbose = FALSE)
# Step 2: Perform PCA to 30 dimensions
data.combined <- RunPCA(object = data.combined,
npcs = 30,
reduction.name = "PCA_SeuBatch",
verbose = FALSE)
# Step 3: Perform UMAP on the first 12 principal components
data.combined <- RunUMAP(object = data.combined,
reduction = "PCA_SeuBatch",
reduction.name = "UMAP_SeuBatch",
dims = 1:12,
verbose = FALSE)
## Warning: The default method for RunUMAP has changed from calling Python UMAP via reticulate to the R-native UWOT using the cosine metric
## To use Python UMAP via reticulate, set umap.method to 'umap-learn' and metric to 'correlation'
## This message will be shown once per session
# UMAP visualization
p1 <- DimPlot(object = data.combined,
reduction = "UMAP_SeuBatch",
group.by = "day")
p2 <- DimPlot(object = data.combined,
reduction = "UMAP_SeuBatch",
group.by = "donor")
p1 + p2
Alternatively, we can transform the Seurat
object back to a SingleCellExperiment
object and generate the visualizations using the Bioconductor functions that we used previously:
# Convert Seurat object to SingleCellExperiment object
sce_intSeurat <- as.SingleCellExperiment(data.combined)
# UMAP without Seurat batch correction
p1 <- plotUMAP(sce,
colour_by = "day") + ggtitle("Day - no batch")
p2 <- plotUMAP(sce,
colour_by = "donor") + ggtitle("Donor - no batch")
# UMAP with Seurat batch correction
sce_intSeurat <- runUMAP(sce_intSeurat,
dimred = 'PCA_SEUBATCH',
min_dist = 0.4,
n_dimred = 12,
external_neighbors = TRUE)
p3 <- plotUMAP(sce_intSeurat,
colour_by = "day") + ggtitle("Day - with batch")
p4 <- plotUMAP(sce_intSeurat,
colour_by = "donor") + ggtitle("Donor - with batch")
p1 + p3
p2 + p4
The batch correction seems to have worked very well. Both with and without batch correction, we observe that cells of the same time point cluster together. But when no batch correction is performed, there seems to be an additional level of variability in the data.
When we color the cells based on the donor variable, we see that without batch correction cells of the same donor cluster together. After batch correction, such donor effects seem to be no longer present.
Also note that while batch correction remove the between-donor variability, the between-day variability that is of interest is still preserved. As such, based on our visualizations, it looks like the batch correction did not overcorrect and succeeded in removing only the unwanted variation in the data.
Harmony implements a complex data integration strategy, where cells are first clustered using a ‘high-diversity clustering’, favoring clusters consisting of multiple batches/datasets, and then batch effects are corrected within each cluster using a linear correction term. It iterates across these steps until no change is clustering is observed.
Performing batch correction with harmony
is very simple; we only need a single function call and can directly work with our SingleCellExperiment
object.
The function RunHarmony
takes the following inputs:
object
: name of the SingleCellExperiment
object
group.by.vars
: the names of the variables for which we want to perform batch correction. Conveniently, this can be multiple variables, so we can correct both for donor and experiment effects (since the correspondence between the two was not perfect)
reduction
: Name of the previously computed reduced dimension space on which batch correction will be performed (not on raw data to improve signal-to-noise ratio). Typically PCA is used
reduction.save
: name to store the batch corrected dimenion reduced space
verbose
: print progress or not.
library(harmony)
## Loading required package: Rcpp
set.seed(684864)
sce <- harmony::RunHarmony(object = sce,
group.by.vars = c("donor", "experiment"),
reduction = "PCA",
reduction.save = "HARMONY_donor_experiment",
verbose = FALSE)
The output is an additional element in the reduced dimension space:
reducedDim(sce, type="PCA")[1:5,1:2]
## PC1 PC2
## 21554_5#128 -27.364852 9.594060
## 21554_5#142 -26.726070 8.421937
## 21554_5#174 -16.417046 16.397747
## 21554_5#176 -4.164451 15.322549
## 21554_5#181 -22.143156 7.574105
reducedDim(sce, type="HARMONY_donor_experiment")[1:5,1:2]
## HARMONY_donor_experiment_1 HARMONY_donor_experiment_2
## 21554_5#128 -25.018798 9.814387
## 21554_5#142 -24.132245 5.674046
## 21554_5#174 -15.195094 15.285123
## 21554_5#176 8.976033 6.388050
## 21554_5#181 -19.884244 7.579687
for which the values differ as compared to the original PCA coordinates. This is a consequence of the batch correction.
plotReducedDim(object = sce,
dimred = "PCA",
colour_by = "day")
plotReducedDim(object = sce,
dimred = "HARMONY_donor_experiment",
colour_by = "day")
When coloring on day, both plots look very similar.
plotReducedDim(object = sce,
dimred = "PCA",
colour_by = "donor")
plotReducedDim(object = sce,
dimred = "HARMONY_donor_experiment",
colour_by = "donor")
This figure is too cluttered to see the donor effects. Below, we therefore generate a separate figure for each time point.
# without bacth correction
p1 <- plotReducedDim(object = sce,
dimred = "PCA",
colour_by = "donor")
p1 + facet_wrap(~sce$day, ncol=1)
Especially for time points day0 and day1, we observe clear donor effects.
# harmony batch correction
p2 <- plotReducedDim(object = sce,
dimred = "HARMONY_donor_experiment",
colour_by = "donor")
p2 + facet_wrap(~sce$day, ncol=1)
After batch correction with harmony, the donor effects are largely removed.
When using UMAP visualization, the discrepancy between the uncorrected and batch corrected data becomes even clearer:
# make UMAP space based on batch-corrected PCA data
sce <- runUMAP(sce,
dimred = 'HARMONY_donor_experiment',
n_dimred = 12,
min_dist = 0.4,
external_neighbors=TRUE,
name = "UMAP_HARMONY_donor_experiment")
# No batch versus batch corrected, color by day
p1 <- plotReducedDim(sce,
dimred = "UMAP",
colour_by = "day") + ggtitle("Day - no batch")
p2 <- plotReducedDim(sce,
dimred = "UMAP_HARMONY_donor_experiment",
colour_by = "day") + ggtitle("Day - batch")
p1 + p2
# No batch versus batch corrected, color by donor
p3 <- plotReducedDim(sce,
dimred = "UMAP",
colour_by = "donor") + ggtitle("Donor - no batch")
p4 <- plotReducedDim(sce,
dimred = "UMAP_HARMONY_donor_experiment",
colour_by = "donor") + ggtitle("Donor - batch")
p3 + p4
# No batch versus batch corrected, color by experiment
p5 <- plotReducedDim(sce,
dimred = "UMAP",
colour_by = "experiment") + ggtitle("Experiment - no batch")
p6 <- plotReducedDim(sce,
dimred = "UMAP_HARMONY_donor_experiment",
colour_by = "experiment") + ggtitle("Experiment - batch")
p5 + p6
We here show the UMAP visualizations comparing non-corrected versus the batch corrected data, with cells colored based on day, donor and experiment. The interpretation is analogous to the interpretation above for the Seurat batch correction:
The batch correction seems to have worked very well. Both with and without batch correction, we observe that cells of the same time point cluster together. But when no batch correction is performed, there seems to be an additional level of variability in the data.
When we color the cells based on the donor/experiment variable, we see that without batch correction cells of the same donor cluster together. After batch correction, such donor/experiment effects seem to be no longer present.
Also note that while batch correction remove the between-donor/experiment variability, the between-day variability that is of interest is still preserved. As such, based on our visualizations, it looks like the batch correction did not overcorrect and succeeded in removing only the unwanted variation in the data.
In the second lab session, we discussed graph-based clustering, k-means clustering and hierarchical clustering. Here, we will perform hierarchical clustering.
We may split the hierarchical clustering process in two intuitive steps:
Compute the pairwise distances between all cells. These are by default euclidean distances and, in order to reduce data complexity and increase signal to noise, we may perform this on the top (30) PC’s. Implemented in the dist
function.
Perform a hierarchical clustering analysis on these distances. Initially, each cell is assigned to its own cluster and then the algorithm proceeds iteratively, at each stage joining the two most similar clusters, continuing until there is just a single cluster. This is implemented in the hclust
function.
Note that the hclust
function allows for specifying a method
argument. The differences between the different methods are beyond the scope of this session, but a brief description is provided in the function help file. In the context of scRNA-seq, we have mostly seen the use of the “ward.D2” method.
distsce <- dist(reducedDim(sce, "HARMONY_donor_experiment")[,1:10])
hcl <- hclust(distsce, method = "ward.D2")
plot(hcl, labels = FALSE)
Next, we need to “cut the tree”, i.e., choose at which resolution we want to report the (cell-type) clusters. This can be achieved with the cutree
function. As an input, cutree
takes the dendrogram from the hclust
function and a threshold value for cutting the tree. This is either k
, the number of clusters we want to report, or h
, the height in the dendrogram at which we wan to cut the tree.
Here we choose k = 4
, since we know in advance that we expect four clusters of cells (four time points).
clust_hcl_k4 <- cutree(hcl, k = 4)
table(clust_hcl_k4)
## clust_hcl_k4
## 1 2 3 4
## 832 980 1001 918
Next, we visualize the data in PCA space colored based on time point and based on our inferred cluster labels.
sce$clust_hcl_k4 <- as.factor(clust_hcl_k4)
plotReducedDim(sce,
dimred = "HARMONY_donor_experiment",
colour_by ="day")
plotReducedDim(sce,
dimred = "HARMONY_donor_experiment",
colour_by="clust_hcl_k4")
We see that our inferred clustering largely corresponds with the original day variable.
table(sce$day, sce$clust_hcl_k4)
##
## 1 2 3 4
## day0 31 0 826 0
## day1 28 0 43 877
## day2 730 215 89 38
## day3 43 765 43 3
Mapping clusters to timepoints, we note that most cells of day0 are in hierarchical cluster 3 (arbitrary indicator), day1 cells are in cluster 4, day2 cells are mainly in cluster 1 and day3 cells are found back in cluster 2.
Trajectory inference is a computational procedure that attempts to summarize a dynamic process in a ‘trajectory’. The trajectory tries to find a sensible ordering of the cells according to their progression through this dynamic process. Trajectories can be linear, diverging, converging or cyclic. If there are multiple differentiation paths, each path is called a lineage, and the combination of lineages is called a trajectory.
Based on the trajectory, one may define a pseudotime for each cell, which defines that cell’s progression along one of the differentiation paths: if the pseudotime of a cell is close to zero, then that cell is close to the starting point of the trajectory.
Many methods for estimating trajectories exist. Here, we will use slingshot to create a trajectory for the Cuomo dataset.
library(slingshot)
sce <- slingshot(sce, # target object
start.clus = "3", # cluster that corresponds to day0 cells!
end.clus = "2", # cluster that corresponds to day3 cells!
clusterLabels = "clust_hcl_k4", # clustering to use
reducedDim = "HARMONY_donor_experiment") # dimred to use
plot(reducedDims(sce)$HARMONY_donor_experiment,
col = as.factor(sce$clust_hcl_k4), # colored according to inferred clusters
pch=16,
asp = 1)
lines(SlingshotDataSet(sce),
lwd=3,
type = 'lineages',
col = 'orange')
plot(reducedDims(sce)$HARMONY_donor_experiment,
col = as.factor(sce$day), # colored according to original time points
pch=16,
asp = 1)
lines(SlingshotDataSet(sce),
lwd=3,
col = 'orange')
We could also use ggplot2
functions to obtained prettier figures that are more easy to annotate:
suppressPackageStartupMessages(library(dplyr))
rd <- reducedDim(sce, type="HARMONY_donor_experiment")[,1:2] # extract first two PCs
colnames(rd) <- c("Dim1", "Dim2") # give column names
cl <- sce$clust_hcl_k4 # store clustering
df <- data.frame(rd, "cl" = as.character(cl)) # PCs and clustering together in 1 data frame
sds <- slingshot(rd, cl) # convert data frame to slingshot object
curves <- slingCurves(sds, as.df = TRUE) # obtain smooth curve for lineages (here only one lineage)
# plot cells as dots in dimension reduced space
p <- ggplot(df, aes(x = Dim1, y = Dim2)) +
geom_point(aes(fill = cl), col = "grey70", shape = 21) +
theme_classic()
# add smooth curves
p + geom_path(data = curves %>% arrange(Order),
aes(group = Lineage), size = 1.5)
# compute and add minimum spanning tree
mst <- slingMST(sds, as.df = TRUE)
p + geom_point(data = mst, size = 4) +
geom_path(data = mst %>% arrange(Order), aes(group = Lineage), size = 2)
We see that the trajectory inferred with slingshot
nicely captures the expected developmental process.
As a next step, we may be interested in identifying the genes are involved in the developmental process. To this end, our group has developed the Bioconductor R package tradeSeq. tradeSeq
is a flexible tool that allows for studying differential gene expression along such a trajectory, within or between different lineages. The functionalities of the package are summarized in the following figure:
knitr::include_graphics("./../tradeseq_summary.jpeg")
library(tradeSeq)
tradeSeq
uses a negative binomial generalized additive model (NB-GAM) framework to smooth each gene’s expression in each lineage. Smoothers can be decomposed into a set of basis functions, which are joined together at knot points, often simply called knots (k
).
Ideally, the number of knots should be selected to reach an optimal bias-variance trade-off for the smoother, where one explains as much variability in the expression data as possible with only a few regression coefficients. In order to guide that choice, we developed diagnostic plots using the Akaike Information Criterion (AIC). This is implemented in the evaluateK
function in tradeSeq
.
However, in the tradeSeq publication the authors show that the algorithm is not too sensitive for the number of knots, and that values between 5 and 9 often work well. The code below would allow you to find a good value for k
, but we will simply use the default value of k = 6
downstream.
### Find knots
# We first need to decide on the number of knots. This is done using the -->
# `evaluateK` function. This takes a little time. -->
# takes 9 minutes for me
set.seed(5)
icMat <- evaluateK(counts = assays(sce)$counts,
sds = sling$slingshot,
k = 3:10,
nGenes = 500,
verbose = T)
We fit a negative binomial generalized additive model (NB-GAM), smoothing each gene’s expression along the developmental trajectory. When modeling all the genes (±10.000), this function takes about 20 minutes to complete. For this lab session, we will therefore randomly sample 1.000 genes. In addition, we make sure that three specific genes are also included: “ENSG00000111704”, “ENSG00000164458” and “ENSG00000141448”. These genes are known markers for the different developmental stages nd were use in the publication of Cuomo et al..
The tradeSeq
model allows us to incorporate fixed effects. While we have estimated the trajectory on the integrated data, we model the raw gene expression data, since tradeSeq
is a count model and requires raw counts as input. On the raw counts, we therefore still have substantial variability that may be explained by the patient effects. We therefore incorporate a patient fixed effect to the design matrix.
set.seed(7)
subset_genes <- sample(rownames(sce), 1000, replace = FALSE)
# genes from paper
markers <- c("ENSG00000111704", "ENSG00000164458", "ENSG00000141448")
# make sure the genes from the paper are in there
subset_genes <- c(subset_genes, markers[!markers %in% subset_genes])
# ±7min for 1000 genes
pseudotime <- slingPseudotime(sce, na = FALSE)
cellWeights <- slingCurveWeights(sce)
patient <- colData(sce)$donor
U <- model.matrix(~ 0 + patient)
sce_fit <- fitGAM(counts = assays(sce)$counts[subset_genes,],
pseudotime = pseudotime,
cellWeights = cellWeights,
nknots = 6,
U = U,
verbose = TRUE)
To assess if all models have converged;
table(rowData(sce_fit)$tradeSeq$converged)
##
## TRUE
## 1003
Next, we can perform different types of differential expression testing along the trajectory.
The associationTest
assesses whether the average expression of a gene is associated with pseudotime. To prioritize for genes that are also biologically relevant, we may test against a log-fold change cut-off:
assoRes2 <- associationTest(sce_fit, l2fc=log2(2))
sum(p.adjust(assoRes2$pvalue, method = "BH") < 0.05, na.rm=T)/nrow(assoRes2)
## [1] 0.219342
Another type of test is to compare the average gene expression of each gene between the start point and the end point of a lineage. This is implemented in the startVsEndTest
function.
startRes <- startVsEndTest(sce_fit, l2fc=log2(2))
We can then visualize the gene expression profile along pseudotime of the top 5 genes for which differential expression between start and end point was identified.
oStart <- order(startRes$waldStat, decreasing = TRUE)
for (i in 1:5) {
sigGeneStart <- oStart[i] # top 5 most significant genes in the start vs. end test
print(plotSmoothers(sce_fit,
assays(sce_fit)$counts,
gene = sigGeneStart) +
ggtitle(rownames(sce)[sigGeneStart]))
}
In the Cuomo paper, the authors highlighted the following genes; “ENSG00000111704”, “ENSG00000164458” and “ENSG00000141448”. In gene symbols, these genes are NANOG, T (yes, there is a gene called “T)” and GATA6. These genes are markers of day0, day1 and day2/day3 cells, respectively.
We may now visualize the expression of these genes along pseudotime.
plotSmoothers(sce_fit,
assays(sce_fit)$counts,
gene = which(rownames(sce_fit) == "ENSG00000111704")) +
ggtitle("ENSG00000111704")
plotSmoothers(sce_fit,
assays(sce_fit)$counts,
gene = which(rownames(sce_fit) == "ENSG00000164458")) +
ggtitle("ENSG00000164458")
plotSmoothers(sce_fit,
assays(sce_fit)$counts,
gene = which(rownames(sce_fit) == "ENSG00000141448")) +
ggtitle("ENSG00000141448")
A very nice correspondence with the results presented in the paper!!
knitr::include_graphics("./../cuomo_traj1.jpeg")
knitr::include_graphics("./../cuomo_traj2.jpeg")
knitr::include_graphics("./../cuomo_traj3.jpeg")
We also inspect the results in our differential testing output.
Association test:
assoRes2[markers,]
## waldStat df pvalue meanLogFC
## ENSG00000111704 4.596703e+13 5 0 1.514278
## ENSG00000164458 1.651328e+14 5 0 4.718686
## ENSG00000141448 NA NA NA 10.946757
Start versus end test:
startRes[markers,]
## waldStat df pvalue logFClineage1
## ENSG00000111704 0.1264224 1 7.221706e-01 -0.7887447
## ENSG00000164458 15.3192211 1 9.078804e-05 2.7371488
## ENSG00000141448 223.4040585 1 0.000000e+00 13.0634051
Another interesting visualization is implemented in the plotGeneCount
function, which colors the cells based on the log-transformed expression value of the target genes.
plotGeneCount(sce$slingshot,
assays(sce_fit)$counts,
gene = which(rownames(sce_fit) == "ENSG00000111704"))
plotGeneCount(sce$slingshot,
assays(sce_fit)$counts,
gene = which(rownames(sce_fit) == "ENSG00000164458"))
plotGeneCount(sce$slingshot,
assays(sce_fit)$counts,
gene = which(rownames(sce_fit) == "ENSG00000141448"))