# HG changeset patch
# User davidvanzessen
# Date 1444297576 14400
# Node ID 7658e9f3d416d35f7a1f13c72a389cfa2c6d8298
# Parent 1b5b862b055b9042b9c4dc233def5a6b73582f71
Uploaded
diff -r 1b5b862b055b -r 7658e9f3d416 RScript.r
--- a/RScript.r Mon Sep 28 08:08:33 2015 -0400
+++ b/RScript.r Thu Oct 08 05:46:16 2015 -0400
@@ -54,26 +54,6 @@
dat$paste = paste(dat$Sample, dat$Clone_Sequence)
-#remove duplicate V+J+CDR3, add together numerical values
-if(mergeOn != "Clone_Sequence"){
- cat("
Adding duplicate V+J+CDR3 sequences |
", file=logfile, append=T)
- dat= data.frame(data.table(dat)[, list(Receptor=unique(.SD$Receptor),
- Cell_Count=unique(.SD$Cell_Count),
- Clone_Molecule_Count_From_Spikes=sum(.SD$Clone_Molecule_Count_From_Spikes),
- Total_Read_Count=sum(.SD$Total_Read_Count),
- dsPerM=ifelse("dsPerM" %in% names(dat), sum(.SD$dsPerM), 0),
- Related_to_leukemia_clone=all(.SD$Related_to_leukemia_clone),
- Frequency=sum(.SD$Frequency),
- locus_V=unique(.SD$locus_V),
- locus_J=unique(.SD$locus_J),
- min_cell_count=unique(.SD$min_cell_count),
- normalized_read_count=sum(.SD$normalized_read_count),
- Log10_Frequency=sum(.SD$Log10_Frequency),
- Clone_Sequence=.SD$Clone_Sequence[1],
- min_cell_paste=.SD$min_cell_paste[1],
- paste=unique(.SD$paste)), by=c("Patient", "Sample", "V_Segment_Major_Gene", "J_Segment_Major_Gene", "CDR3_Sense_Sequence")])
-}
-
patients = split(dat, dat$Patient, drop=T)
intervalReads = rev(c(0,10,25,50,100,250,500,750,1000,10000))
intervalFreq = rev(c(0,0.01,0.05,0.1,0.5,1,5))
@@ -146,16 +126,22 @@
scatterplot_data$type = factor(x=oneSample, levels=c(oneSample, twoSample, "In Both"))
scatterplot_data$on = onShort
- patientMerge = merge(patient1, patient2, by.x="merge", by.y="merge")
-
+ #patientMerge = merge(patient1, patient2, by.x="merge", by.y="merge") #merge alles 'fuzzy'
+ patientMerge = merge(patient1, patient2, by.x="merge", by.y="merge")[NULL,] #blegh
+
+ cs.exact.matches = patient1[patient1$Clone_Sequence %in% patient2$Clone_Sequence,]$Clone_Sequence
+
#fuzzy matching here...
if(mergeOn == "Clone_Sequence"){
- merge.list = patientMerge$merge
+ #merge.list = patientMerge$merge
- patient1.fuzzy = patient1[!(patient1$merge %in% merge.list),]
- patient2.fuzzy = patient2[!(patient2$merge %in% merge.list),]
-
+ #patient1.fuzzy = patient1[!(patient1$merge %in% merge.list),]
+ #patient2.fuzzy = patient2[!(patient2$merge %in% merge.list),]
+
+ patient1.fuzzy = patient1
+ patient2.fuzzy = patient2
+
#patient1.fuzzy$merge = paste(patient1.fuzzy$V_Segment_Major_Gene, patient1.fuzzy$J_Segment_Major_Gene, patient1.fuzzy$CDR3_Sense_Sequence)
#patient2.fuzzy$merge = paste(patient2.fuzzy$V_Segment_Major_Gene, patient2.fuzzy$J_Segment_Major_Gene, patient2.fuzzy$CDR3_Sense_Sequence)
@@ -165,98 +151,108 @@
patient1.fuzzy$merge = paste(patient1.fuzzy$locus_V, patient1.fuzzy$locus_J)
patient2.fuzzy$merge = paste(patient2.fuzzy$locus_V, patient2.fuzzy$locus_J)
- merge.freq.table = data.frame(table(c(patient1.fuzzy[!duplicated(patient1.fuzzy$merge),"merge"], patient2.fuzzy[!duplicated(patient2.fuzzy$merge),"merge"])))
- merge.freq.table.gt.1 = merge.freq.table[merge.freq.table$Freq > 1,]
+ #merge.freq.table = data.frame(table(c(patient1.fuzzy[!duplicated(patient1.fuzzy$merge),"merge"], patient2.fuzzy[!duplicated(patient2.fuzzy$merge),"merge"]))) #also remove?
+ #merge.freq.table.gt.1 = merge.freq.table[merge.freq.table$Freq > 1,]
- patient1.fuzzy = patient1.fuzzy[patient1.fuzzy$merge %in% merge.freq.table.gt.1$Var1,]
- patient2.fuzzy = patient2.fuzzy[patient2.fuzzy$merge %in% merge.freq.table.gt.1$Var1,]
+ #patient1.fuzzy = patient1.fuzzy[patient1.fuzzy$merge %in% merge.freq.table.gt.1$Var1,]
+ #patient2.fuzzy = patient2.fuzzy[patient2.fuzzy$merge %in% merge.freq.table.gt.1$Var1,]
patient.fuzzy = rbind(patient1.fuzzy, patient2.fuzzy)
patient.fuzzy = patient.fuzzy[order(nchar(patient.fuzzy$Clone_Sequence)),]
-
+
+ merge.list = list()
+
+ merge.list[["second"]] = vector()
+
+
while(nrow(patient.fuzzy) > 1){
first.merge = patient.fuzzy[1,"merge"]
first.clone.sequence = patient.fuzzy[1,"Clone_Sequence"]
-
+ first.sample = patient.fuzzy[1,"Sample"]
merge.filter = first.merge == patient.fuzzy$merge
- length.filter = nchar(patient.fuzzy$Clone_Sequence) - nchar(first.clone.sequence) <= 9
+ #length.filter = nchar(patient.fuzzy$Clone_Sequence) - nchar(first.clone.sequence) <= 9
- sample.filter = patient.fuzzy[1,"Sample"] != patient.fuzzy$Sample
+ first.sample.filter = first.sample == patient.fuzzy$Sample
+ second.sample.filter = first.sample != patient.fuzzy$Sample
+
+ #first match same sample, sum to a single row, same for other sample
+ #then merge rows like 'normal'
sequence.filter = grepl(paste("^", first.clone.sequence, sep=""), patient.fuzzy$Clone_Sequence)
-
+
+
+
#match.filter = merge.filter & grepl(first.clone.sequence, patient.fuzzy$Clone_Sequence) & length.filter & sample.filter
- match.filter = merge.filter & sequence.filter & sample.filter
-
- if(sum(match.filter) == 1){
- second.match = which(match.filter)[1]
- second.clone.sequence = patient.fuzzy[second.match,"Clone_Sequence"]
- first.sample = patient.fuzzy[1,"Sample"]
- second.sample = patient.fuzzy[second.match,"Sample"]
-
- first.match.row = patient.fuzzy[1,]
- second.match.row = patient.fuzzy[second.match,]
- print(paste(first.merge, first.match.row$normalized_read_count, second.match.row$normalized_read_count, first.clone.sequence, second.clone.sequence))
- patientMerge.new.row = data.frame(merge=first.clone.sequence,
- min_cell_paste.x=first.match.row[1,"min_cell_paste"],
- Patient.x=first.match.row[1,"Patient"],
- Receptor.x=first.match.row[1,"Receptor"],
- Sample.x=first.match.row[1,"Sample"],
- Cell_Count.x=first.match.row[1,"Cell_Count"],
- Clone_Molecule_Count_From_Spikes.x=first.match.row[1,"Clone_Molecule_Count_From_Spikes"],
- Log10_Frequency.x=first.match.row[1,"Log10_Frequency"],
- Total_Read_Count.x=first.match.row[1,"Total_Read_Count"],
- dsPerM.x=first.match.row[1,"dsPerM"],
- J_Segment_Major_Gene.x=first.match.row[1,"J_Segment_Major_Gene"],
- V_Segment_Major_Gene.x=first.match.row[1,"V_Segment_Major_Gene"],
- Clone_Sequence.x=first.match.row[1,"Clone_Sequence"],
- CDR3_Sense_Sequence.x=first.match.row[1,"CDR3_Sense_Sequence"],
- Related_to_leukemia_clone.x=first.match.row[1,"Related_to_leukemia_clone"],
- Frequency.x=first.match.row[1,"Frequency"],
- locus_V.x=first.match.row[1,"locus_V"],
- locus_J.x=first.match.row[1,"locus_J"],
- min_cell_count.x=first.match.row[1,"min_cell_count"],
- normalized_read_count.x=first.match.row[1,"normalized_read_count"],
- paste.x=first.match.row[1,"paste"],
- min_cell_paste.y=second.match.row[1,"min_cell_paste"],
- Patient.y=second.match.row[1,"Patient"],
- Receptor.y=second.match.row[1,"Receptor"],
- Sample.y=second.match.row[1,"Sample"],
- Cell_Count.y=second.match.row[1,"Cell_Count"],
- Clone_Molecule_Count_From_Spikes.y=second.match.row[1,"Clone_Molecule_Count_From_Spikes"],
- Log10_Frequency.y=second.match.row[1,"Log10_Frequency"],
- Total_Read_Count.y=second.match.row[1,"Total_Read_Count"],
- dsPerM.y=second.match.row[1,"dsPerM"],
- J_Segment_Major_Gene.y=second.match.row[1,"J_Segment_Major_Gene"],
- V_Segment_Major_Gene.y=second.match.row[1,"V_Segment_Major_Gene"],
- Clone_Sequence.y=second.match.row[1,"Clone_Sequence"],
- CDR3_Sense_Sequence.y=second.match.row[1,"CDR3_Sense_Sequence"],
- Related_to_leukemia_clone.y=second.match.row[1,"Related_to_leukemia_clone"],
- Frequency.y=second.match.row[1,"Frequency"],
- locus_V.y=second.match.row[1,"locus_V"],
- locus_J.y=second.match.row[1,"locus_J"],
- min_cell_count.y=second.match.row[1,"min_cell_count"],
- normalized_read_count.y=second.match.row[1,"normalized_read_count"],
- paste.y=first.match.row[1,"paste"])
-
-
- patientMerge = rbind(patientMerge, patientMerge.new.row)
- patient.fuzzy = patient.fuzzy[-match.filter,]
-
- patient1 = patient1[!(patient1$Clone_Sequence %in% c(first.clone.sequence, second.clone.sequence)),]
- patient2 = patient2[!(patient2$Clone_Sequence %in% c(first.clone.sequence, second.clone.sequence)),]
-
- scatterplot_data = scatterplot_data[scatterplot_data$merge != second.clone.sequence,]
-
- } else if (sum(match.filter) > 1){
- cat(paste("", "Multiple matches (", sum(match.filter), ") found for", first.merge, "in", patient, " |
", sep=" "), file=logfile, append=T)
- patient.fuzzy = patient.fuzzy[-1,]
+ first.match.filter = merge.filter & sequence.filter & first.sample.filter
+ second.match.filter = merge.filter & sequence.filter & second.sample.filter
+
+ first.rows = patient.fuzzy[first.match.filter,]
+ second.rows = patient.fuzzy[second.match.filter,]
+
+ first.sum = data.frame(merge = first.clone.sequence,
+ Patient = patient,
+ Receptor = first.rows[1,"Receptor"],
+ Sample = first.rows[1,"Sample"],
+ Cell_Count = first.rows[1,"Cell_Count"],
+ Clone_Molecule_Count_From_Spikes = sum(first.rows$Clone_Molecule_Count_From_Spikes),
+ Log10_Frequency = log10(sum(first.rows$Frequency)),
+ Total_Read_Count = sum(first.rows$Total_Read_Count),
+ dsPerM = sum(first.rows$dsPerM),
+ J_Segment_Major_Gene = sort(table(first.rows$J_Segment_Major_Gene),decreasing=TRUE)[1],
+ V_Segment_Major_Gene = sort(table(first.rows$V_Segment_Major_Gene),decreasing=TRUE)[1],
+ Clone_Sequence = first.clone.sequence,
+ CDR3_Sense_Sequence = first.rows[1,"CDR3_Sense_Sequence"],
+ Related_to_leukemia_clone = F,
+ Frequency = sum(first.rows$Frequency),
+ locus_V = first.rows[1,"locus_V"],
+ locus_J = first.rows[1,"locus_J"],
+ min_cell_count = first.rows[1,"min_cell_count"],
+ normalized_read_count = sum(first.rows$normalized_read_count),
+ paste = first.rows[1,"paste"],
+ min_cell_paste = first.rows[1,"min_cell_paste"])
+
+ if(nrow(second.rows) > 0){
+ second.sum = data.frame(merge = first.clone.sequence,
+ Patient = patient,
+ Receptor = second.rows[1,"Receptor"],
+ Sample = second.rows[1,"Sample"],
+ Cell_Count = second.rows[1,"Cell_Count"],
+ Clone_Molecule_Count_From_Spikes = sum(second.rows$Clone_Molecule_Count_From_Spikes),
+ Log10_Frequency = log10(sum(second.rows$Frequency)),
+ Total_Read_Count = sum(second.rows$Total_Read_Count),
+ dsPerM = sum(second.rows$dsPerM),
+ J_Segment_Major_Gene = sort(table(second.rows$J_Segment_Major_Gene),decreasing=TRUE)[1],
+ V_Segment_Major_Gene = sort(table(second.rows$V_Segment_Major_Gene),decreasing=TRUE)[1],
+ Clone_Sequence = first.clone.sequence,
+ CDR3_Sense_Sequence = second.rows[1,"CDR3_Sense_Sequence"],
+ Related_to_leukemia_clone = F,
+ Frequency = sum(second.rows$Frequency),
+ locus_V = second.rows[1,"locus_V"],
+ locus_J = second.rows[1,"locus_J"],
+ min_cell_count = second.rows[1,"min_cell_count"],
+ normalized_read_count = sum(second.rows$normalized_read_count),
+ paste = second.rows[1,"paste"],
+ min_cell_paste = second.rows[1,"min_cell_paste"])
+
+ patientMerge = rbind(patientMerge, merge(first.sum, second.sum, by="merge"))
+ patient.fuzzy = patient.fuzzy[!(first.match.filter | second.match.filter),]
+
+
+ if(sum(first.match.filter) == 1 & sum(second.match.filter) == 1){
+ second.clone.sequence = patient.fuzzy[second.match.filter, "Clone_Sequence"]
+ if(nchar(first.clone.sequence) == nchar(second.clone.sequence)){
+ merge.list[["second"]] = append(merge.list[["second"]], second.clone.sequence)
+ }
+ }
+
+ if(nrow(first.rows) > 1 | nrow(second.rows) > 1){
+
+ }
+
} else {
patient.fuzzy = patient.fuzzy[-1,]
}
-
-
}
}
@@ -303,16 +299,19 @@
}
} else {
scatterplot_locus_data = scatterplot_data[grepl(V_Segment, scatterplot_data$V_Segment_Major_Gene) & grepl(J_Segment, scatterplot_data$J_Segment_Major_Gene),]
+ #scatterplot_locus_data = scatterplot_locus_data[!(scatterplot_locus_data$merge %in% merge.list[[twoSample]]),]
+ scatterplot_locus_data = scatterplot_locus_data[!(scatterplot_locus_data$merge %in% merge.list[["second"]]),]
if(nrow(scatterplot_locus_data) > 0){
scatterplot_locus_data$Rearrangement = product[iter, titleIndex]
}
in_one = (scatterplot_locus_data$merge %in% patient1$merge)
in_two = (scatterplot_locus_data$merge %in% patient2$merge)
- not_in_one = !in_one
if(any(in_two)){
- scatterplot_locus_data[not_in_one,]$type = twoSample
+ scatterplot_locus_data[in_two,]$type = twoSample
}
- in_both = (scatterplot_locus_data$merge %in% patientMerge[both,]$merge)
+ in_both = (scatterplot_locus_data$merge %in% patientMerge$merge)
+ #merge.list.filter = (scatterplot_locus_data$merge %in% merge.list[[oneSample]])
+ #exact.matches.filter = (scatterplot_locus_data$merge %in% cs.exact.matches)
if(any(in_both)){
scatterplot_locus_data[in_both,]$type = "In Both"
}
@@ -323,9 +322,9 @@
if(nrow(scatterplot_locus_data) != 0){
if(on == "normalized_read_count"){
scales = 10^(0:6) #(0:ceiling(log10(max(scatterplot_locus_data$normalized_read_count))))
- p = ggplot(scatterplot_locus_data, aes(type, normalized_read_count)) + scale_y_log10(breaks=scales,labels=scales) + expand_limits(y=10^6)
+ p = ggplot(scatterplot_locus_data, aes(type, normalized_read_count)) + scale_y_log10(breaks=scales,labels=scales) + expand_limits(y=10^6) + scale_x_discrete(breaks=levels(scatterplot_data$type), labels=levels(scatterplot_data$type), drop=FALSE)
} else {
- p = ggplot(scatterplot_locus_data, aes(type, Frequency)) + scale_y_continuous(limits = c(0, 100)) + expand_limits(y=c(0,100))
+ p = ggplot(scatterplot_locus_data, aes(type, Frequency)) + scale_y_continuous(limits = c(0, 100)) + expand_limits(y=c(0,100)) + scale_x_discrete(breaks=levels(scatterplot_data$type), labels=levels(scatterplot_data$type), drop=FALSE)
}
p = p + geom_point(aes(colour=type), position="jitter")
p = p + xlab("In one or both samples") + ylab(onShort) + ggtitle(paste(patient1[1,patientIndex], patient1[1,sampleIndex], patient2[1,sampleIndex], onShort, product[iter, titleIndex]))