Mercurial > repos > bgruening > flexynesis_plot
comparison flexynesis_utils.py @ 0:bb91bf19eb40 draft
planemo upload for repository https://github.com/bgruening/galaxytools/tree/master/tools/flexynesis commit b2463fb68d0ae54864d87718ee72f5e063aa4587
| author | bgruening |
|---|---|
| date | Tue, 24 Jun 2025 05:55:50 +0000 |
| parents | |
| children | 3c5d82bf6e8a |
comparison
equal
deleted
inserted
replaced
| -1:000000000000 | 0:bb91bf19eb40 |
|---|---|
| 1 #!/usr/bin/env python | |
| 2 | |
| 3 import argparse | |
| 4 import os | |
| 5 import sys | |
| 6 from pathlib import Path | |
| 7 | |
| 8 import pandas as pd | |
| 9 | |
| 10 | |
| 11 def read_data(data_input, index=False): | |
| 12 """Load CSV or TSV data file.""" | |
| 13 try: | |
| 14 file_ext = Path(data_input).suffix.lower() | |
| 15 sep = ',' if file_ext == '.csv' else '\t' | |
| 16 index_col = 0 if index else None | |
| 17 | |
| 18 if file_ext in ['.csv', '.tsv', '.txt', '.tab', '.tabular']: | |
| 19 return pd.read_csv(data_input, sep=sep, index_col=index_col) | |
| 20 else: | |
| 21 raise ValueError(f"Unsupported file extension: {file_ext}") | |
| 22 except Exception as e: | |
| 23 raise ValueError(f"Error loading data from {data_input}: {e}") from e | |
| 24 | |
| 25 | |
| 26 def binarize_mutations(df, gene_idx=1, sample_idx=2): | |
| 27 """ | |
| 28 Binarize mutation data by creating a matrix of gene x sample with 1/0 values. | |
| 29 """ | |
| 30 # galaxy index is 1-based, convert to zero-based | |
| 31 gene_idx -= 1 | |
| 32 sample_idx -= 1 | |
| 33 # check idx | |
| 34 if gene_idx >= len(df.columns) or sample_idx >= len(df.columns): | |
| 35 raise ValueError(f"Column indices out of bounds. DataFrame has {len(df.columns)} columns, " | |
| 36 f"but requested indices are {gene_idx} and {sample_idx}") | |
| 37 if gene_idx == sample_idx: | |
| 38 raise ValueError("Gene and sample column indices must be different") | |
| 39 | |
| 40 # Get column names by index | |
| 41 gene_col = df.columns[gene_idx] | |
| 42 print(f"Using gene column: {gene_col} (index {gene_idx})") | |
| 43 sample_col = df.columns[sample_idx] | |
| 44 print(f"Using sample column: {sample_col} (index {sample_idx})") | |
| 45 | |
| 46 # Check if columns contain data | |
| 47 if df[gene_col].isna().all(): | |
| 48 raise ValueError(f"Gene column (index {gene_idx}) contains only NaN values.") | |
| 49 if df[sample_col].isna().all(): | |
| 50 raise ValueError(f"Sample column (index {sample_idx}) contains only NaN values.") | |
| 51 | |
| 52 # Group by gene and sample, count mutations | |
| 53 mutation_counts = df.groupby([gene_col, sample_col]).size().reset_index(name='count') | |
| 54 | |
| 55 # Create pivot table | |
| 56 mutation_matrix = mutation_counts.pivot(index=gene_col, columns=sample_col, values='count').fillna(0) | |
| 57 | |
| 58 # Binarize: convert any count > 0 to 1 | |
| 59 mutation_matrix[mutation_matrix > 0] = 1 | |
| 60 | |
| 61 return mutation_matrix | |
| 62 | |
| 63 | |
| 64 def make_data_dict(clin_path, omics_paths): | |
| 65 """Read clinical and omics data files into a dictionary.""" | |
| 66 data = {} | |
| 67 | |
| 68 # Read clinical data | |
| 69 print(f"Reading clinical data from {clin_path}") | |
| 70 try: | |
| 71 clin = read_data(clin_path, index=True) | |
| 72 | |
| 73 if clin.empty: | |
| 74 raise ValueError(f"Clinical file {clin_path} is empty") | |
| 75 data['clin'] = clin | |
| 76 print(f"Loaded clinical data: {clin.shape[0]} samples, {clin.shape[1]} features") | |
| 77 except Exception as e: | |
| 78 raise ValueError(f"Error reading clinical file {clin_path}: {e}") | |
| 79 | |
| 80 # Read omics data | |
| 81 print(f"Reading omics data from {', '.join(omics_paths)}") | |
| 82 for path in omics_paths: | |
| 83 try: | |
| 84 name = os.path.splitext(os.path.basename(path))[0] | |
| 85 df = read_data(path, index=True) | |
| 86 if df.empty: | |
| 87 print(f"Warning: Omics file {path} is empty, skipping") | |
| 88 continue | |
| 89 data[name] = df | |
| 90 print(f"Loaded {name}: {df.shape[0]} features, {df.shape[1]} samples") | |
| 91 except Exception as e: | |
| 92 print(f"Warning: Error reading omics file {path}: {e}") | |
| 93 continue | |
| 94 | |
| 95 if len(data) == 1: # Only clinical data loaded | |
| 96 raise ValueError("No omics data was successfully loaded") | |
| 97 | |
| 98 return data | |
| 99 | |
| 100 | |
| 101 def validate_data_consistency(data): | |
| 102 """Validate that clinical and omics data have consistent samples.""" | |
| 103 clin_samples = set(data['clin'].index) | |
| 104 | |
| 105 for name, df in data.items(): | |
| 106 if name == 'clin': | |
| 107 continue | |
| 108 | |
| 109 omics_samples = set(df.columns) | |
| 110 | |
| 111 # Check for sample overlap | |
| 112 common_samples = clin_samples.intersection(omics_samples) | |
| 113 if len(common_samples) == 0: | |
| 114 raise ValueError(f"No common samples between clinical data and {name}") | |
| 115 | |
| 116 missing_in_omics = clin_samples - omics_samples | |
| 117 missing_in_clin = omics_samples - clin_samples | |
| 118 | |
| 119 if missing_in_omics: | |
| 120 print(f"Warning: {len(missing_in_omics)} clinical samples not found in {name}") | |
| 121 if missing_in_clin: | |
| 122 print(f"Warning: {len(missing_in_clin)} samples in {name} not found in clinical data") | |
| 123 | |
| 124 return True | |
| 125 | |
| 126 | |
| 127 def split_and_save_data(data, ratio=0.7, output_dir='.'): | |
| 128 """Split data into train/test sets and save to files.""" | |
| 129 # Validate data consistency first | |
| 130 validate_data_consistency(data) | |
| 131 | |
| 132 samples = data['clin'].index.tolist() | |
| 133 | |
| 134 train_samples = list(pd.Series(samples).sample(frac=ratio, random_state=42)) | |
| 135 test_samples = list(set(samples) - set(train_samples)) | |
| 136 | |
| 137 train_data = {} | |
| 138 test_data = {} | |
| 139 | |
| 140 for key, df in data.items(): | |
| 141 try: | |
| 142 if key == 'clin': | |
| 143 train_data[key] = df.loc[df.index.intersection(train_samples)] | |
| 144 test_data[key] = df.loc[df.index.intersection(test_samples)] | |
| 145 else: | |
| 146 train_data[key] = df.loc[:, df.columns.intersection(train_samples)] | |
| 147 test_data[key] = df.loc[:, df.columns.intersection(test_samples)] | |
| 148 except Exception as e: | |
| 149 print(f"Error splitting data {key}: {e}") | |
| 150 continue | |
| 151 | |
| 152 # Create output directories | |
| 153 os.makedirs(os.path.join(output_dir, 'train'), exist_ok=True) | |
| 154 os.makedirs(os.path.join(output_dir, 'test'), exist_ok=True) | |
| 155 | |
| 156 # Save train and test data | |
| 157 for key in data.keys(): | |
| 158 try: | |
| 159 train_data[key].to_csv(os.path.join(output_dir, 'train', f'{key}.csv')) | |
| 160 test_data[key].to_csv(os.path.join(output_dir, 'test', f'{key}.csv')) | |
| 161 except Exception as e: | |
| 162 print(f"Error saving {key}: {e}") | |
| 163 continue | |
| 164 | |
| 165 | |
| 166 def main(): | |
| 167 parser = argparse.ArgumentParser(description='Flexynesis extra utilities') | |
| 168 | |
| 169 parser.add_argument("--util", type=str, required=True, | |
| 170 choices=['split', 'binarize'], | |
| 171 help="Utility function: 'split' for spiting data to train and test, 'binarize' for creating a binarized matrix from a mutation data") | |
| 172 | |
| 173 # Arguments for split | |
| 174 parser.add_argument('--clin', required=False, | |
| 175 help='Path to clinical data CSV file (samples in rows)') | |
| 176 parser.add_argument('--omics', required=False, | |
| 177 help='Comma-separated list of omics CSV files (samples in columns)') | |
| 178 parser.add_argument('--split', type=float, default=0.7, | |
| 179 help='Train split ratio (default: 0.7)') | |
| 180 | |
| 181 # Arguments for binarize | |
| 182 parser.add_argument('--mutations', type=str, required=False, | |
| 183 help='Path to mutation data CSV file (samples in rows, genes in columns)') | |
| 184 parser.add_argument('--gene_idx', type=int, default=0, | |
| 185 help='Column index for genes in mutation data (default: 0)') | |
| 186 parser.add_argument('--sample_idx', type=int, default=1, | |
| 187 help='Column index for samples in mutation data (default: 1)') | |
| 188 | |
| 189 # common arguments | |
| 190 parser.add_argument('--out', default='.', | |
| 191 help='Output directory (default: current directory)') | |
| 192 | |
| 193 args = parser.parse_args() | |
| 194 | |
| 195 try: | |
| 196 # validate utility function | |
| 197 if not args.util: | |
| 198 raise ValueError("Utility function must be specified") | |
| 199 if args.util not in ['split', 'binarize']: | |
| 200 raise ValueError(f"Invalid utility function: {args.util}") | |
| 201 | |
| 202 if args.util == 'split': | |
| 203 # Validate inputs | |
| 204 if not args.clin: | |
| 205 raise ValueError("Clinical data file must be provided") | |
| 206 if not args.omics: | |
| 207 raise ValueError("At least one omics file must be provided") | |
| 208 if not os.path.isfile(args.clin): | |
| 209 raise FileNotFoundError(f"Clinical file not found: {args.clin}") | |
| 210 # Validate split ratio | |
| 211 if not 0 < args.split < 1: | |
| 212 raise ValueError(f"Split ratio must be between 0 and 1, got {args.split}") | |
| 213 | |
| 214 elif args.util == 'binarize': | |
| 215 # Validate mutation data file | |
| 216 if not args.mutations: | |
| 217 raise ValueError("Mutation data file must be provided") | |
| 218 if not os.path.isfile(args.mutations): | |
| 219 raise FileNotFoundError(f"Mutation data file not found: {args.mutations}") | |
| 220 # Validate gene and sample indices | |
| 221 if args.gene_idx < 0 or args.sample_idx < 0: | |
| 222 raise ValueError("Gene and sample indices must be non-negative integers") | |
| 223 | |
| 224 # Create output directory if it doesn't exist | |
| 225 if not os.path.exists(args.out): | |
| 226 os.makedirs(args.out) | |
| 227 | |
| 228 if args.util == 'split': | |
| 229 # Parse omics files | |
| 230 omics_files = [f.strip() for f in args.omics.split(',') if f.strip()] | |
| 231 if not omics_files: | |
| 232 raise ValueError("At least one omics file must be provided") | |
| 233 # Check omics files exist | |
| 234 for f in omics_files: | |
| 235 if not os.path.isfile(f): | |
| 236 raise FileNotFoundError(f"Omics file not found: {f}") | |
| 237 data = make_data_dict(args.clin, omics_files) | |
| 238 split_and_save_data(data, ratio=args.split, output_dir=args.out) | |
| 239 | |
| 240 elif args.util == 'binarize': | |
| 241 mutations_df = read_data(args.mutations, index=False) | |
| 242 if mutations_df.empty: | |
| 243 raise ValueError("Mutation data file is empty") | |
| 244 | |
| 245 binarized_matrix = binarize_mutations(mutations_df, gene_idx=args.gene_idx, sample_idx=args.sample_idx) | |
| 246 # Save binarized matrix | |
| 247 output_file = os.path.join(args.out, 'binarized_mutations.csv') | |
| 248 binarized_matrix.to_csv(output_file) | |
| 249 print(f"Binarized mutation matrix saved to {output_file}") | |
| 250 | |
| 251 except Exception as e: | |
| 252 print(f"Error: {e}", file=sys.stderr) | |
| 253 sys.exit(1) | |
| 254 | |
| 255 | |
| 256 if __name__ == "__main__": | |
| 257 main() |
