# HG changeset patch # User davidvanzessen # Date 1441110867 14400 # Node ID 1f1640608245adc0ef6b31460122480b3b712a71 # Parent a3c4e3e62e1054e38e2e7b68f050022527f1da8b Uploaded diff -r a3c4e3e62e10 -r 1f1640608245 extract_duplicates.r --- a/extract_duplicates.r Tue Sep 01 07:49:47 2015 -0400 +++ b/extract_duplicates.r Tue Sep 01 08:34:27 2015 -0400 @@ -3,14 +3,21 @@ input=args[1] column=as.numeric(args[2]) header=(args[3] == "yes") -out_file=args[4] +regex_filter=args[4] +out_file=args[5] + +print(regex_filter) dat = read.table(input, header=header, sep="\t", fill=T, stringsAsFactors=F, quote="") -duplicates = dat[duplicated(dat[,column]),column] +dat.names = names(dat) -dat = dat[dat[,column] %in% duplicates,] +dat$filtered = gsub("\\(.*", "", dat[,column]) + +duplicates = dat[duplicated(dat$filtered),"filtered"] -dat = dat[order(dat[,column]),] +dat = dat[dat[,"filtered"] %in% duplicates,] -write.table(dat, out_file, sep="\t", row.names=F, col.names=header, quote=F) +dat = dat[order(dat[,"filtered"]),] + +write.table(dat[,dat.names], out_file, sep="\t", row.names=F, col.names=header, quote=F) diff -r a3c4e3e62e10 -r 1f1640608245 extract_duplicates.sh --- a/extract_duplicates.sh Tue Sep 01 07:49:47 2015 -0400 +++ b/extract_duplicates.sh Tue Sep 01 08:34:27 2015 -0400 @@ -1,7 +1,8 @@ input=$1 column=$2 header=$3 -out_file=$4 +regex_filter=$4 +out_file=$5 dir="$(cd "$(dirname "$0")" && pwd)" -Rscript --verbose $dir/extract_duplicates.r ${input} ${column} ${header} ${out_file} 2>&1 +Rscript --verbose $dir/extract_duplicates.r ${input} ${column} ${header} "${regex_filter}" ${out_file} 2>&1 diff -r a3c4e3e62e10 -r 1f1640608245 extract_duplicates.xml --- a/extract_duplicates.xml Tue Sep 01 07:49:47 2015 -0400 +++ b/extract_duplicates.xml Tue Sep 01 08:34:27 2015 -0400 @@ -1,11 +1,12 @@ to a new dataset - extract_duplicates.sh $input $column $header $out_file + extract_duplicates.sh $input $column $header "$regex_filter" $out_file +