Repository 'mqppep_anova'
hg clone https://eddie.galaxyproject.org/repos/eschen42/mqppep_anova

Changeset 13:b41a077af3aa (2022-03-22)
Previous changeset 12:4deacfee76ef (2022-03-15) Next changeset 14:6679616d0c18 (2022-03-22)
Commit message:
"planemo upload for repository https://github.com/galaxyproteomics/tools-galaxyp/tree/master/tools/mqppep commit 040e4945da00a279cb60daae799fce9489f99c50"
modified:
macros.xml
mqppep_anova.R
mqppep_anova.xml
mqppep_anova_script.Rmd
added:
workflow/ppenrich_suite_wf.ga
b
diff -r 4deacfee76ef -r b41a077af3aa macros.xml
--- a/macros.xml Tue Mar 15 18:17:55 2022 +0000
+++ b/macros.xml Tue Mar 22 20:47:40 2022 +0000
b
@@ -1,5 +1,5 @@
 <macros>
-    <token name="@TOOL_VERSION@">0.1.3</token>
+    <token name="@TOOL_VERSION@">0.1.4</token>
     <token name="@VERSION_SUFFIX@">0</token>
     <xml name="requirements">
         <requirements>
b
diff -r 4deacfee76ef -r b41a077af3aa mqppep_anova.R
--- a/mqppep_anova.R Tue Mar 15 18:17:55 2022 +0000
+++ b/mqppep_anova.R Tue Mar 22 20:47:40 2022 +0000
[
b'@@ -1,207 +1,243 @@\n-#!/usr/bin/env Rscript\r\n-# libraries\r\n-library(optparse)\r\n-library(data.table)\r\n-library(stringr)\r\n-# bioconductor-preprocesscore\r\n-#  - libopenblas\r\n-#  - r-data.table\r\n-#  - r-rmarkdown\r\n-#  - r-ggplot2\r\n-#  - texlive-core\r\n-\r\n-# ref for parameterizing Rmd document: https://stackoverflow.com/a/37940285\r\n-\r\n-# parse options\r\n-option_list <- list(\r\n-  make_option(\r\n-    c("-i", "--inputFile"),\r\n-    action = "store",\r\n-    default = NA,\r\n-    type = "character",\r\n-    help = "Phosphopeptide Intensities sparse input file path"\r\n-  ),\r\n-  make_option(\r\n-    c("-a", "--alphaFile"),\r\n-    action = "store",\r\n-    default = NA,\r\n-    type = "character",\r\n-    help = paste0("List of alpha cutoff values for significance testing;",\r\n-             " path to text file having one column and no header")\r\n-  ),\r\n-  make_option(\r\n-    c("-f", "--firstDataColumn"),\r\n-    action = "store",\r\n-    default = "10",\r\n-    type = "character",\r\n-    help = "First column of intensity values"\r\n-  ),\r\n-  make_option(\r\n-    c("-m", "--imputationMethod"),\r\n-    action = "store",\r\n-    default = "group-median",\r\n-    type = "character",\r\n-    help = paste0("Method for missing-value imputation,",\r\n-             " one of c(\'group-median\',\'median\',\'mean\',\'random\')")\r\n-  ),\r\n-  make_option(\r\n-    c("-p", "--meanPercentile"),\r\n-    action = "store",\r\n-    default = 3,\r\n-    type = "integer",\r\n-    help = paste0("Mean percentile for randomly generated imputed values;",\r\n-              ", range [1,99]")\r\n-  ),\r\n-  make_option(\r\n-    c("-d", "--sdPercentile"),\r\n-    action = "store",\r\n-    default = 3,\r\n-    type = "double",\r\n-    help = paste0("Adjustment value for standard deviation of",\r\n-              " randomly generated imputed values; real")\r\n-  ),\r\n-  make_option(\r\n-    c("-s", "--regexSampleNames"),\r\n-    action = "store",\r\n-    default = "\\\\.(\\\\d+)[A-Z]$",\r\n-    type = "character",\r\n-    help = "Regular expression extracting sample-names"\r\n-  ),\r\n-  make_option(\r\n-    c("-g", "--regexSampleGrouping"),\r\n-    action = "store",\r\n-    default = "(\\\\d+)",\r\n-    type = "character",\r\n-    help = paste0("Regular expression extracting sample-group",\r\n-             " from an extracted sample-name")\r\n-  ),\r\n-  make_option(\r\n-    c("-o", "--imputedDataFile"),\r\n-    action = "store",\r\n-    default = "output_imputed.tsv",\r\n-    type = "character",\r\n-    help = "Imputed Phosphopeptide Intensities output file path"\r\n-  ),\r\n-  make_option(\r\n-    c("-r", "--reportFile"),\r\n-    action = "store",\r\n-    default = "QuantDataProcessingScript.html",\r\n-    type = "character",\r\n-    help = "HTML report file path"\r\n-  )\r\n-)\r\n-args <- parse_args(OptionParser(option_list = option_list))\r\n-\r\n-# Check parameter values\r\n-\r\n-if (! file.exists(args$inputFile)) {\r\n-  stop((paste("Input file", args$inputFile, "does not exist")))\r\n-}\r\n-input_file <- args$inputFile\r\n-alpha_file <- args$alphaFile\r\n-first_data_column <- args$firstDataColumn\r\n-imputation_method <- args$imputationMethod\r\n-mean_percentile <- args$meanPercentile\r\n-sd_percentile <- args$sdPercentile\r\n-\r\n-regex_sample_names    <- gsub("^[ \\t\\n]*", "",\r\n-                         readChar(args$regexSampleNames,  1000)\r\n-                       )\r\n-regex_sample_names    <- gsub("[ \\t\\n]*$", "",\r\n-                         regex_sample_names\r\n-                       )\r\n-cat(regex_sample_names)\r\n-cat("\\n")\r\n-\r\n-regex_sample_grouping <- gsub("^[ \\t\\n]*", "",\r\n-                           readChar(args$regexSampleGrouping, 1000)\r\n-                         )\r\n-regex_sample_grouping <- gsub("[ \\t\\n]*$", "",\r\n-                           regex_sample_grouping\r\n-                         )\r\n-cat(regex_sample_grouping)\r\n-cat("\\n")\r\n-\r\n-imputed_data_file_name <- args$imputedDataFile\r\n-report_file_name <- args$reportFile\r\n-\r\n-print("args is:")\r\n-cat(str(args))\r\n-\r\n-print("regex_sample_names is:")\r\n-cat(str(regex_sample_names))\r\n-\r\n-print("regex_sample_grouping is:")\r\n-cat(str(regex_sample_grouping))\r\n-\r\n-# from: https://github.co'..b'rgs$alphaFile\n+first_data_column <- args$firstDataColumn\n+imputation_method <- args$imputationMethod\n+print(\n+  grepl(\n+    pattern = imputation_method,\n+    x = c("group-median", "median", "mean", "random")\n+    )\n+  )\n+\n+if (\n+  sum(\n+    grepl(\n+      pattern = imputation_method,\n+      x = c("group-median", "median", "mean", "random")\n+      )\n+    ) < 1\n+  ) {\n+    print(sprintf("bad imputationMethod argument: %s", imputation_method))\n+    return(-1)\n+    }\n+\n+mean_percentile <- args$meanPercentile\n+print("mean_percentile is:")\n+cat(str(mean_percentile))\n+\n+sd_percentile <- args$sdPercentile\n+print("sd_percentile is:")\n+cat(str(mean_percentile))\n+\n+\n+regex_sample_names    <- gsub("^[ \\t\\n]*", "",\n+                         readChar(args$regexSampleNames,  1000)\n+                       )\n+regex_sample_names    <- gsub("[ \\t\\n]*$", "",\n+                         regex_sample_names\n+                       )\n+cat(regex_sample_names)\n+cat("\\n")\n+\n+regex_sample_grouping <- gsub("^[ \\t\\n]*", "",\n+                           readChar(args$regexSampleGrouping, 1000)\n+                         )\n+regex_sample_grouping <- gsub("[ \\t\\n]*$", "",\n+                           regex_sample_grouping\n+                         )\n+cat(regex_sample_grouping)\n+cat("\\n")\n+\n+imputed_data_file_name <- args$imputedDataFile\n+imp_qn_lt_data_filenm <-  args$imputedQNLTDataFile\n+report_file_name <- args$reportFile\n+\n+print("regex_sample_names is:")\n+cat(str(regex_sample_names))\n+\n+print("regex_sample_grouping is:")\n+cat(str(regex_sample_grouping))\n+\n+# from: https://github.com/molgenis/molgenis-pipelines/wiki/\n+#   How-to-source-another_file.R-from-within-your-R-script\n+# Function location_of_this_script returns the location of this .R script\n+#   (may be needed to source other files in same dir)\n+location_of_this_script <- function() {\n+    this_file <- NULL\n+    # This file may be \'sourced\'\n+    for (i in - (1:sys.nframe())) {\n+        if (identical(sys.function(i), base::source)) {\n+            this_file <- (normalizePath(sys.frame(i)$ofile))\n+        }\n+    }\n+\n+    if (!is.null(this_file)) return(dirname(this_file))\n+\n+    # But it may also be called from the command line\n+    cmd_args <- commandArgs(trailingOnly = FALSE)\n+    cmd_args_trailing <- commandArgs(trailingOnly = TRUE)\n+    cmd_args <- cmd_args[\n+      seq.int(\n+        from = 1,\n+        length.out = length(cmd_args) - length(cmd_args_trailing)\n+        )\n+      ]\n+    res <- gsub("^(?:--file=(.*)|.*)$", "\\\\1", cmd_args)\n+\n+    # If multiple --file arguments are given, R uses the last one\n+    res <- tail(res[res != ""], 1)\n+    if (0 < length(res)) return(dirname(res))\n+\n+    # Both are not the case. Maybe we are in an R GUI?\n+    return(NULL)\n+}\n+\n+script_dir <-  location_of_this_script()\n+\n+rmarkdown_params <- list(\n+    inputFile = input_file\n+  , alphaFile = alpha_file\n+  , firstDataColumn = first_data_column\n+  , imputationMethod = imputation_method\n+  , meanPercentile = mean_percentile\n+  , sdPercentile = sd_percentile\n+  , regexSampleNames = regex_sample_names\n+  , regexSampleGrouping = regex_sample_grouping\n+  , imputedDataFilename = imputed_data_file_name\n+  , imputedQNLTDataFile = imp_qn_lt_data_filenm\n+  )\n+\n+print("rmarkdown_params")\n+str(rmarkdown_params)\n+\n+# freeze the random number generator so the same results will be produced\n+#  from run to run\n+set.seed(28571)\n+\n+# BUG (or "opportunity")\n+# To render as PDF for the time being requires installing the conda\n+# package `r-texlive` until this issue in `texlive-core` is resolved:\n+#   https://github.com/conda-forge/texlive-core-feedstock/issues/19\n+# This workaround is detailed in the fourth comment of:\n+#   https://github.com/conda-forge/texlive-core-feedstock/issues/61\n+\n+library(tinytex)\n+tinytex::install_tinytex()\n+rmarkdown::render(\n+  input = paste(script_dir, "mqppep_anova_script.Rmd", sep = "/")\n+, output_format = rmarkdown::pdf_document(toc = TRUE)\n+, output_file = report_file_name\n+, params = rmarkdown_params\n+)\n'
b
diff -r 4deacfee76ef -r b41a077af3aa mqppep_anova.xml
--- a/mqppep_anova.xml Tue Mar 15 18:17:55 2022 +0000
+++ b/mqppep_anova.xml Tue Mar 22 20:47:40 2022 +0000
[
@@ -19,21 +19,24 @@
       cd \$TEMP;
       cp '$__tool_directory__/mqppep_anova_script.Rmd' . || exit 0;
       cp '$__tool_directory__/mqppep_anova.R' . || exit 0;
-      \${CONDA_PREFIX}/bin/Rscript \$TEMP/mqppep_anova.R 
-      --inputFile '$input_file' 
-      --alphaFile $alpha_file
-      --firstDataColumn $first_data_column
-      --imputationMethod $imputation.imputation_method
-      #if '$imputation_method' == 'random':
-        --meanPercentile '$meanPercentile'
-        --sdPercentile   '$sdPercentile'
-      #end if
-      --regexSampleNames $sample_names_regex_f
-      --regexSampleGrouping $sample_grouping_regex_f
-      --imputedDataFile $imputed_data_file
-      --reportFile $report_file;
+      \${CONDA_PREFIX}/bin/Rscript \$TEMP/mqppep_anova.R
+        --inputFile '$input_file'
+        --alphaFile '$alpha_file'
+        --firstDataColumn $first_data_column
+        --imputationMethod $imputation.imputation_method
+        #if $imputation.imputation_method == "random"
+          --meanPercentile '$imputation.meanPercentile'
+          --sdPercentile   '$imputation.sdPercentile'
+        #end if
+        --regexSampleNames $sample_names_regex_f
+        --regexSampleGrouping $sample_grouping_regex_f
+        --imputedDataFile $imputed_data_file
+        --imputedQNLTDataFile '$imp_qn_lt_file'
+        --reportFile '$report_file';
+      export RESULT=\$?;
       cd \${OLD_PWD};
-      rm -rf \$HOME
+      rm -rf \$HOME;
+      exit \${RESULT}
     ]]></command>
     <configfiles>
       <configfile name="sample_names_regex_f">
@@ -98,7 +101,8 @@
         </param>
     </inputs>
     <outputs>
-        <data name="imputed_data_file" format="tabular" label="${input_file.name}.${imputation.imputation_method}-imputed_QN_LT_intensities" ></data>
+        <data name="imputed_data_file" format="tabular" label="${input_file.name}.${imputation.imputation_method}-imputed_intensities" ></data>
+        <data name="imp_qn_lt_file" format="tabular" label="${input_file.name}.${imputation.imputation_method}-imputed_QN_LT_intensities" ></data>
         <!--
         <data name="report_file" format="html" label="${input_file.name}.${imputation.imputation_method}-imputed_report (download/unzip to view)" ></data>
         -->
@@ -112,7 +116,7 @@
             <param name="imputation_method" value="group-median"/>
             <param name="sample_names_regex" value="\.\d+[A-Z]$"/>
             <param name="sample_grouping_regex" value="\d+"/>
-            <output name="imputed_data_file">
+            <output name="imp_qn_lt_file">
                 <assert_contents>
                     <has_text text="Phosphopeptide" />
                     <has_text text="AAAITDMADLEELSRLpSPLPPGpSPGSAAR" />
@@ -130,11 +134,11 @@
             <param name="sdPercentile" value="0.2" />
             <param name="sample_names_regex" value="\.\d+[A-Z]$"/>
             <param name="sample_grouping_regex" value="\d+"/>
-            <output name="imputed_data_file">
+            <output name="imp_qn_lt_file">
                 <assert_contents>
                     <has_text text="Phosphopeptide" />
                     <has_text text="AAAITDMADLEELSRLpSPLPPGpSPGSAAR" />
-                    <has_text text="0.82258" />
+                    <has_text text="8.392287" />
                     <has_text text="pSQKQEEENPAEETGEEK" />
                 </assert_contents>
             </output>
@@ -192,11 +196,14 @@
 
 **Outputs**
 
-``intensities_*-imputed_QN_LT``
+``imputed_intensities``
+  Phosphopeptide MS intensities where missing values have been **imputed** by the chosen method, in tabular format.
+
+``imputed_QN_LT_intensities``
   Phosphopeptide MS intensities where missing values have been **imputed** by the chosen method, quantile-normalized (**QN**), and log10-transformed (**LT**), in tabular format.
 
 ``report_file``
-  Summary report for normalization, imputation, and ANOVA, in PDF format.
+  Summary report for normalization, imputation, and **ANOVA**, in PDF format.
 
 **Authors**
 
b
diff -r 4deacfee76ef -r b41a077af3aa mqppep_anova_script.Rmd
--- a/mqppep_anova_script.Rmd Tue Mar 15 18:17:55 2022 +0000
+++ b/mqppep_anova_script.Rmd Tue Mar 22 20:47:40 2022 +0000
[
b'@@ -1,24 +1,50 @@\n ---\n title: "MaxQuant Phospho-Proteomic Enrichment Pipeline ANOVA"\n author: "Larry Cheng; Art Eschenlauer"\n-date: "May 28, 2018; Nov 16, 2021"\n+date: "May 28, 2018; Mar 16, 2022"\n output:\n-  pdf_document: default\n+  pdf_document:\n+    toc: true\n+  latex_document:\n+    toc: true\n params:\n   inputFile: "test-data/test_input_for_anova.tabular"\n   alphaFile: "test-data/alpha_levels.tabular"\n   firstDataColumn: "Intensity"\n   imputationMethod: !r c("group-median", "median", "mean", "random")[1]\n   meanPercentile: 1\n-  sdPercentile: 0.2\n+  sdPercentile: 1.0\n   regexSampleNames: "\\\\.\\\\d+[A-Z]$"\n   regexSampleGrouping: "\\\\d+"\n-  imputedDataFilename: "Upstream_Map_pST_outputfile_STEP4_QN_LT.txt"\n+  imputedDataFilename: "test-data/imputedDataFilename.txt"\n+  imputedQNLTDataFile: "test-data/imputedQNLTDataFile.txt"\n+  show_toc: true\n ---\n+<!--\n+  latex_document: default\n+  inputFile: "test-data/test_input_for_anova.tabular"\n+  inputFile: "test-data/density_failure.preproc_tab.tabular"\n+  inputFile: "test-data/UT_Phospho_STY_Sites.preproc_tab"\n+date: "May 28, 2018; Mar 16, 2022"\n+-->\n ```{r setup, include = FALSE}\n # ref for parameterizing Rmd document: https://stackoverflow.com/a/37940285\n knitr::opts_chunk$set(echo = FALSE, fig.dim = c(9, 10))\n \n+# freeze the random number generator so the same results will be produced\n+#  from run to run\n+set.seed(28571)\n+\n+### CONSTANTS\n+\n+const_parfin <- par("fin")\n+const_boxplot_fill <- "grey94"\n+const_stripchart_cex <- 0.5\n+const_stripsmall_cex <-\n+  sqrt(const_stripchart_cex * const_stripchart_cex / 2)\n+const_stripchart_jitter <- 0.3\n+const_write_debug_files <- FALSE\n+\n ### FUNCTIONS\n \n #ANOVA filter function\n@@ -27,9 +53,155 @@\n   pvalue <- summary(x_aov)[[1]][["Pr(>F)"]][1]\n   pvalue\n }\n+\n+write_debug_file <- function(s) {\n+  if (const_write_debug_files) {\n+    s_path <- sprintf("test-data/%s.txt", deparse(substitute(s)))\n+    write.table(\n+      s,\n+      file = s_path,\n+      sep = "\\t",\n+      col.names = TRUE,\n+      row.names = TRUE,\n+      quote = FALSE\n+    )\n+  }\n+}\n+\n+latex_collapsed_vector <- function(collapse_string, v) {\n+  cat(\n+    paste0(\n+      gsub("_", "\\\\\\\\_", v),\n+      collapse = collapse_string\n+      )\n+    )\n+}\n+\n+latex_itemized_collapsed <- function(collapse_string, v) {\n+  cat("\\\\begin{itemize}\\n\\\\item ")\n+  latex_collapsed_vector(collapse_string, v)\n+  cat("\\n\\\\end{itemize}\\n")\n+}\n+\n+latex_itemized_list <- function(v) {\n+  latex_itemized_collapsed("\\n\\\\item ", v)\n+}\n+\n+latex_enumerated_collapsed <- function(collapse_string, v) {\n+  cat("\\\\begin{enumerate}\\n\\\\item ")\n+  latex_collapsed_vector(collapse_string, v)\n+  cat("\\n\\\\end{enumerate}\\n")\n+}\n+\n+latex_enumerated_list <- function(v) {\n+  latex_enumerated_collapsed("\\n\\\\item ", v)\n+}\n+\n+latex_table_row <- function(v) {\n+  latex_collapsed_vector(" & ", v)\n+  cat(" \\\\\\\\\\n")\n+}\n+\n+# Use this like print.data.frame, from which it is adapted:\n+print_data_frame_latex <-\n+  function(\n+    x,\n+    ...,\n+    # digits to pass to format.data.frame\n+    digits = NULL,\n+    # TRUE -> right-justify columns; FALSE -> left-justify\n+    right = TRUE,\n+    # maximumn number of rows to print\n+    max = NULL,\n+    # string with justification of each column\n+    justification = NULL,\n+    # TRUE to center on page\n+    centered = FALSE,\n+    # optional capttion\n+    caption = NULL,\n+    # h(inline); b(bottom); t (top) or p (separate page)\n+    anchor = "h"\n+  ) {\n+    if (is.null(justification))\n+      justification <-\n+        Reduce(\n+          f = paste,\n+          x = rep_len(if (right) "r" else "l", length(colnames(x)))\n+          )\n+    n <- length(rownames(x))\n+    if (length(x) == 0L) {\n+      cat(\n+        sprintf(\n+          # if n is one, use singular \'row\', else use plural \'rows\'\n+          ngettext(\n+            n,\n+            "data frame with 0 columns and %d row",\n+            "data frame with 0 columns and %d rows"\n+            ),\n+          n\n+          ),\n+        "\\n",\n+        sep = ""\n+        )\n+   '..b',\n         main = "Imputed, normalized intensities", # no line plot\n-        las = 2,\n+        las = 1,\n+        col = const_boxplot_fill,\n         ylab = expression(log[10](intensity))\n       )\n+      # Points\n+      stripchart(\n+        filtered_data_filtered,     # Data\n+        method = "jitter",          # Random noise\n+        jitter = const_stripchart_jitter,\n+        pch = 19,                   # Pch symbols\n+        cex = const_stripchart_cex, # Size of symbols reduced\n+        col = "goldenrod",          # Color of the symbol\n+        vertical = TRUE,            # Vertical mode\n+        add = TRUE                  # Add it over\n+        )\n+      par(old_par)\n     } else {\n       cat(sprintf(\n-        "No peptides were found to have cutoff adjusted p-value < %0.2f\\n",\n+        "%s < %0.2f\\n\\n\\n\\n\\n",\n+        "No peptides were found to have cutoff adjusted p-value <",\n         cutoff\n       ))\n     }\n-    par(old_par)\n \n     if (nrow(filtered_data_filtered) > 0) {\n       #Add Phosphopeptide column to anova_filtered table\n       anova_filtered_merge <- merge(\n-        x = connect_df\n-        ,\n-        y = filtered_data_filtered\n-        ,\n-        by.x = "Intensity"\n-        ,\n+        x = connect_df,\n+        y = filtered_data_filtered,\n+        by.x = "Intensity",\n         by.y = 1\n       )\n       anova_filtered_merge_order <- rownames(filtered_p)\n@@ -759,12 +1181,9 @@\n       # Merge qualitative columns into the ANOVA data\n       output_table <- data.frame(anova_filtered$Phosphopeptide)\n       output_table <- merge(\n-        x = output_table\n-        ,\n-        y = data_table_imp_qn_lt\n-        ,\n-        by.x = "anova_filtered.Phosphopeptide"\n-        ,\n+        x = output_table,\n+        y = data_table_imp_qn_lt,\n+        by.x = "anova_filtered.Phosphopeptide",\n         by.y = "Phosphopeptide"\n       )\n \n@@ -777,7 +1196,7 @@\n           nrow = nrow(m)\n           )\n         )\n-      m <- m[!m_nan_rows, ]\n+      m <- m[!m_nan_rows, , drop = FALSE]\n       if (nrow(m) > 0) {\n         rownames_m <- rownames(m)\n         rownames(m) <- sapply(\n@@ -791,11 +1210,15 @@\n             )\n           }\n         )\n-        margins <-\n-          c(max(nchar(colnames(m))) * 10 / 16 # col\n-            , max(nchar(rownames(m))) * 5 / 16 # row\n-            )\n         how_many_peptides <- min(50, nrow(m))\n+        number_of_peptides_found <- how_many_peptides\n+        if (nrow(m) > 1) {\n+          m_margin <- m[how_many_peptides:1, ]\n+          margins <-\n+            c(max(nchar(colnames(m_margin))) * 10 / 16 # col\n+              , max(nchar(rownames(m_margin))) * 5 / 16 # row\n+              )\n+          }\n \n         cat("\\\\newpage\\n")\n         if (nrow(m) > 50) {\n@@ -805,16 +1228,19 @@\n               cutoff)\n           )\n         } else {\n-          cat("Heatmap for peptides whose",\n-            sprintf("adjusted p-value < %0.2f\\n",\n-            cutoff)\n-          )\n+          if (nrow(m) == 1) {\n+            cat(\n+              sprintf("Heatmap for %d usable peptides whose", nrow(m)),\n+              sprintf("adjusted p-value < %0.2f\\n", cutoff)\n+            )\n+            next\n+          }\n         }\n-        cat("\\\\newline\\n")\n-        cat("\\\\newline\\n")\n-        op <- par("cex.main")\n+        cat("\\n\\n\\n")\n+        cat("\\n\\n\\n")\n         try(\n           if (nrow(m) > 1) {\n+            old_oma <- par("oma")\n             par(cex.main = 0.6)\n             heatmap(\n               m[how_many_peptides:1, ],\n@@ -823,19 +1249,19 @@\n               cexRow = 0.7,\n               cexCol = 0.8,\n               scale = "row",\n-              #ACE scale = "none",\n               margins = margins,\n               main =\n-                "Heatmap of unimputed, unnormalized intensities",\n-              xlab = ""\n+                "Unimputed, unnormalized intensities",\n+              xlab = "",\n+              las = 1 #, fin = c(9, 5.5)\n               )\n           }\n         )\n-        par(op)\n       }\n     }\n   }\n }\n+cat("\\\\leavevmode\\n\\n\\n")\n ```\n \n <!--\n'
b
diff -r 4deacfee76ef -r b41a077af3aa workflow/ppenrich_suite_wf.ga
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/workflow/ppenrich_suite_wf.ga Tue Mar 22 20:47:40 2022 +0000
[
b'@@ -0,0 +1,652 @@\n+{\n+    "a_galaxy_workflow": "true",\n+    "annotation": "phoshpoproteomic enrichment data pre-processing and ANOVA",\n+    "creator": [\n+        {\n+            "class": "Person",\n+            "identifier": "0000-0002-2882-0508",\n+            "name": "Art Eschenlauer"\n+        }\n+    ],\n+    "format-version": "0.1",\n+    "license": "MIT",\n+    "name": "ppenrich_suite_wf",\n+    "steps": {\n+        "0": {\n+            "annotation": "The Phospho (STY)Sites.txt file produced by MaxQuant (found in the txt folder).",\n+            "content_id": null,\n+            "errors": null,\n+            "id": 0,\n+            "input_connections": {},\n+            "inputs": [\n+                {\n+                    "description": "The Phospho (STY)Sites.txt file produced by MaxQuant (found in the txt folder).",\n+                    "name": "Phospho (STY)Sites.txt"\n+                }\n+            ],\n+            "label": "Phospho (STY)Sites.txt",\n+            "name": "Input dataset",\n+            "outputs": [],\n+            "position": {\n+                "bottom": 346.3999938964844,\n+                "height": 81.89999389648438,\n+                "left": 495,\n+                "right": 695,\n+                "top": 264.5,\n+                "width": 200,\n+                "x": 495,\n+                "y": 264.5\n+            },\n+            "tool_id": null,\n+            "tool_state": "{\\"optional\\": false, \\"format\\": [\\"tabular\\"]}",\n+            "tool_version": null,\n+            "type": "data_input",\n+            "uuid": "21c3c29d-9e8c-4ece-b585-9e68fed7a93f",\n+            "workflow_outputs": []\n+        },\n+        "1": {\n+            "annotation": "FASTA file of all human canonical isoforms, derived from Swiss-Prot (e.g., merge of https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot_varsplic.fasta.gz and https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.fasta.gz)",\n+            "content_id": null,\n+            "errors": null,\n+            "id": 1,\n+            "input_connections": {},\n+            "inputs": [\n+                {\n+                    "description": "FASTA file of all human canonical isoforms, derived from Swiss-Prot (e.g., merge of https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot_varsplic.fasta.gz and https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.fasta.gz)",\n+                    "name": "SwissProt_Human_Canonical_Isoform.fasta"\n+                }\n+            ],\n+            "label": "SwissProt_Human_Canonical_Isoform.fasta",\n+            "name": "Input dataset",\n+            "outputs": [],\n+            "position": {\n+                "bottom": 708.8000030517578,\n+                "height": 102.30000305175781,\n+                "left": 685,\n+                "right": 885,\n+                "top": 606.5,\n+                "width": 200,\n+                "x": 685,\n+                "y": 606.5\n+            },\n+            "tool_id": null,\n+            "tool_state": "{\\"optional\\": false, \\"format\\": [\\"fasta\\"]}",\n+            "tool_version": null,\n+            "type": "data_input",\n+            "uuid": "5da7317c-4def-48f3-8eac-af95bd18b290",\n+            "workflow_outputs": []\n+        },\n+        "2": {\n+            "annotation": "Derived from https://networkin.info/download/networkin_human_predictions_3.1.tsv.xz (which is free for non-commercial use - for required citation, see https://networkin.info/)",\n+            "content_id": null,\n+            "errors": null,\n+            "id": 2,\n+            "input_connections": {},\n+            "inputs": [\n+                {\n+                    "description": "Derived from https://networkin.info/download/networkin_human_predictions_3.1.tsv.xz (which is free for non-commercial use - for required citation, see https://networkin.info/)",\n+                    "name": "NetworKIN_cutoffscore2.0.tab'..b'_regex\\": \\"\\\\\\\\.(\\\\\\\\d+)[A-Z]$\\", \\"__page__\\": null, \\"__rerun_remap_job_id__\\": null}",\n+            "tool_version": null,\n+            "type": "tool",\n+            "uuid": "ffa771c3-c52d-42a4-b78f-a60a39678792",\n+            "workflow_outputs": [\n+                {\n+                    "label": "intensities_group-mean-imputed_QN_LT",\n+                    "output_name": "imputed_data_file",\n+                    "uuid": "169d677f-0acb-4c56-b057-21f4aaf2b920"\n+                },\n+                {\n+                    "label": "intensities_group-mean-imputed_report",\n+                    "output_name": "report_file",\n+                    "uuid": "25edae88-3bb6-4ec9-8b98-686fded7ed79"\n+                }\n+            ]\n+        },\n+        "9": {\n+            "annotation": "Perform ANOVA. For imputing missing values, create random values.",\n+            "content_id": "mqppep_anova",\n+            "errors": null,\n+            "id": 9,\n+            "input_connections": {\n+                "alpha_file": {\n+                    "id": 6,\n+                    "output_name": "output"\n+                },\n+                "input_file": {\n+                    "id": 7,\n+                    "output_name": "preproc_tab"\n+                }\n+            },\n+            "inputs": [],\n+            "label": "MaxQuant Phosphopeptide ANOVA randomly imputed",\n+            "name": "MaxQuant Phosphopeptide ANOVA",\n+            "outputs": [\n+                {\n+                    "name": "imputed_data_file",\n+                    "type": "tabular"\n+                },\n+                {\n+                    "name": "report_file",\n+                    "type": "pdf"\n+                }\n+            ],\n+            "position": {\n+                "bottom": 1609.6000061035156,\n+                "height": 255.60000610351562,\n+                "left": 1617,\n+                "right": 1817,\n+                "top": 1354,\n+                "width": 200,\n+                "x": 1617,\n+                "y": 1354\n+            },\n+            "post_job_actions": {\n+                "RenameDatasetActionimputed_data_file": {\n+                    "action_arguments": {\n+                        "newname": "#{input_file}.intensities_randomly-imputed_QN_LT"\n+                    },\n+                    "action_type": "RenameDatasetAction",\n+                    "output_name": "imputed_data_file"\n+                },\n+                "RenameDatasetActionreport_file": {\n+                    "action_arguments": {\n+                        "newname": "#{input_file}.intensities_randomly-imputed_report (download/unzip to view)"\n+                    },\n+                    "action_type": "RenameDatasetAction",\n+                    "output_name": "report_file"\n+                }\n+            },\n+            "tool_id": "mqppep_anova",\n+            "tool_state": "{\\"alpha_file\\": {\\"__class__\\": \\"ConnectedValue\\"}, \\"first_data_column\\": \\"Intensity\\", \\"imputation\\": {\\"imputation_method\\": \\"random\\", \\"__current_case__\\": 3, \\"meanPercentile\\": \\"1\\", \\"sdPercentile\\": \\"0.2\\"}, \\"input_file\\": {\\"__class__\\": \\"ConnectedValue\\"}, \\"sample_grouping_regex\\": \\"(\\\\\\\\d+)\\", \\"sample_names_regex\\": \\"\\\\\\\\.(\\\\\\\\d+)[A-Z]$\\", \\"__page__\\": null, \\"__rerun_remap_job_id__\\": null}",\n+            "type": "tool",\n+            "uuid": "f1f2bdf9-fbc0-4205-b834-9a8af5814dc9",\n+            "workflow_outputs": [\n+                {\n+                    "label": "intensities_randomly-imputed_QN_LT",\n+                    "output_name": "imputed_data_file",\n+                    "uuid": "d70a3476-fb42-4533-831b-4fcb2bda74fc"\n+                },\n+                {\n+                    "label": "intensities_randomly-imputed_report",\n+                    "output_name": "report_file",\n+                    "uuid": "d6701a61-357b-4a27-8154-ca41eb16d8a6"\n+                }\n+            ]\n+        }\n+    },\n+    "tags": [\n+        "ppenrich"\n+    ],\n+    "uuid": "445a0eb0-25c7-44c0-8259-a3346b01cbf3",\n+    "version": 3\n+}\n'