Mercurial > repos > bgruening > stacking_ensemble_models
comparison search_model_validation.py @ 3:0a1812986bc3 draft
planemo upload for repository https://github.com/bgruening/galaxytools/tree/master/tools/sklearn commit 9981e25b00de29ed881b2229a173a8c812ded9bb
| author | bgruening |
|---|---|
| date | Wed, 09 Aug 2023 11:10:37 +0000 |
| parents | 38c4f8a98038 |
| children |
comparison
equal
deleted
inserted
replaced
| 2:38c4f8a98038 | 3:0a1812986bc3 |
|---|---|
| 1 import argparse | 1 import argparse |
| 2 import collections | 2 import json |
| 3 import os | |
| 4 import sys | |
| 5 import warnings | |
| 6 from distutils.version import LooseVersion as Version | |
| 7 | |
| 3 import imblearn | 8 import imblearn |
| 4 import joblib | 9 import joblib |
| 5 import json | |
| 6 import numpy as np | 10 import numpy as np |
| 7 import os | |
| 8 import pandas as pd | 11 import pandas as pd |
| 9 import pickle | |
| 10 import skrebate | 12 import skrebate |
| 11 import sys | 13 from galaxy_ml import __version__ as galaxy_ml_version |
| 12 import warnings | 14 from galaxy_ml.binarize_target import IRAPSClassifier |
| 15 from galaxy_ml.model_persist import dump_model_to_h5, load_model_from_h5 | |
| 16 from galaxy_ml.utils import ( | |
| 17 clean_params, | |
| 18 get_cv, | |
| 19 get_main_estimator, | |
| 20 get_module, | |
| 21 get_scoring, | |
| 22 read_columns, | |
| 23 SafeEval, | |
| 24 try_get_attr | |
| 25 ) | |
| 13 from scipy.io import mmread | 26 from scipy.io import mmread |
| 14 from sklearn import (cluster, decomposition, feature_selection, | 27 from sklearn import ( |
| 15 kernel_approximation, model_selection, preprocessing) | 28 cluster, |
| 29 decomposition, | |
| 30 feature_selection, | |
| 31 kernel_approximation, | |
| 32 model_selection, | |
| 33 preprocessing, | |
| 34 ) | |
| 16 from sklearn.exceptions import FitFailedWarning | 35 from sklearn.exceptions import FitFailedWarning |
| 36 from sklearn.model_selection import _search, _validation | |
| 17 from sklearn.model_selection._validation import _score, cross_validate | 37 from sklearn.model_selection._validation import _score, cross_validate |
| 18 from sklearn.model_selection import _search, _validation | 38 from sklearn.preprocessing import LabelEncoder |
| 19 from sklearn.pipeline import Pipeline | 39 from skopt import BayesSearchCV |
| 20 | 40 |
| 21 from galaxy_ml.utils import (SafeEval, get_cv, get_scoring, load_model, | 41 N_JOBS = int(os.environ.get("GALAXY_SLOTS", 1)) |
| 22 read_columns, try_get_attr, get_module, | |
| 23 clean_params, get_main_estimator) | |
| 24 | |
| 25 | |
| 26 _fit_and_score = try_get_attr('galaxy_ml.model_validations', '_fit_and_score') | |
| 27 setattr(_search, '_fit_and_score', _fit_and_score) | |
| 28 setattr(_validation, '_fit_and_score', _fit_and_score) | |
| 29 | |
| 30 N_JOBS = int(os.environ.get('GALAXY_SLOTS', 1)) | |
| 31 # handle disk cache | 42 # handle disk cache |
| 32 CACHE_DIR = os.path.join(os.getcwd(), 'cached') | 43 CACHE_DIR = os.path.join(os.getcwd(), "cached") |
| 33 del os | 44 NON_SEARCHABLE = ( |
| 34 NON_SEARCHABLE = ('n_jobs', 'pre_dispatch', 'memory', '_path', | 45 "n_jobs", |
| 35 'nthread', 'callbacks') | 46 "pre_dispatch", |
| 47 "memory", | |
| 48 "_path", | |
| 49 "_dir", | |
| 50 "nthread", | |
| 51 "callbacks", | |
| 52 ) | |
| 36 | 53 |
| 37 | 54 |
| 38 def _eval_search_params(params_builder): | 55 def _eval_search_params(params_builder): |
| 39 search_params = {} | 56 search_params = {} |
| 40 | 57 |
| 41 for p in params_builder['param_set']: | 58 for p in params_builder["param_set"]: |
| 42 search_list = p['sp_list'].strip() | 59 search_list = p["sp_list"].strip() |
| 43 if search_list == '': | 60 if search_list == "": |
| 44 continue | 61 continue |
| 45 | 62 |
| 46 param_name = p['sp_name'] | 63 param_name = p["sp_name"] |
| 47 if param_name.lower().endswith(NON_SEARCHABLE): | 64 if param_name.lower().endswith(NON_SEARCHABLE): |
| 48 print("Warning: `%s` is not eligible for search and was " | 65 print( |
| 49 "omitted!" % param_name) | 66 "Warning: `%s` is not eligible for search and was " |
| 67 "omitted!" % param_name | |
| 68 ) | |
| 50 continue | 69 continue |
| 51 | 70 |
| 52 if not search_list.startswith(':'): | 71 if not search_list.startswith(":"): |
| 53 safe_eval = SafeEval(load_scipy=True, load_numpy=True) | 72 safe_eval = SafeEval(load_scipy=True, load_numpy=True) |
| 54 ev = safe_eval(search_list) | 73 ev = safe_eval(search_list) |
| 55 search_params[param_name] = ev | 74 search_params[param_name] = ev |
| 56 else: | 75 else: |
| 57 # Have `:` before search list, asks for estimator evaluatio | 76 # Have `:` before search list, asks for estimator evaluatio |
| 58 safe_eval_es = SafeEval(load_estimators=True) | 77 safe_eval_es = SafeEval(load_estimators=True) |
| 59 search_list = search_list[1:].strip() | 78 search_list = search_list[1:].strip() |
| 60 # TODO maybe add regular express check | 79 # TODO maybe add regular express check |
| 61 ev = safe_eval_es(search_list) | 80 ev = safe_eval_es(search_list) |
| 62 preprocessings = ( | 81 preprocessings = ( |
| 63 preprocessing.StandardScaler(), preprocessing.Binarizer(), | 82 preprocessing.StandardScaler(), |
| 83 preprocessing.Binarizer(), | |
| 64 preprocessing.MaxAbsScaler(), | 84 preprocessing.MaxAbsScaler(), |
| 65 preprocessing.Normalizer(), preprocessing.MinMaxScaler(), | 85 preprocessing.Normalizer(), |
| 86 preprocessing.MinMaxScaler(), | |
| 66 preprocessing.PolynomialFeatures(), | 87 preprocessing.PolynomialFeatures(), |
| 67 preprocessing.RobustScaler(), feature_selection.SelectKBest(), | 88 preprocessing.RobustScaler(), |
| 89 feature_selection.SelectKBest(), | |
| 68 feature_selection.GenericUnivariateSelect(), | 90 feature_selection.GenericUnivariateSelect(), |
| 69 feature_selection.SelectPercentile(), | 91 feature_selection.SelectPercentile(), |
| 70 feature_selection.SelectFpr(), feature_selection.SelectFdr(), | 92 feature_selection.SelectFpr(), |
| 93 feature_selection.SelectFdr(), | |
| 71 feature_selection.SelectFwe(), | 94 feature_selection.SelectFwe(), |
| 72 feature_selection.VarianceThreshold(), | 95 feature_selection.VarianceThreshold(), |
| 73 decomposition.FactorAnalysis(random_state=0), | 96 decomposition.FactorAnalysis(random_state=0), |
| 74 decomposition.FastICA(random_state=0), | 97 decomposition.FastICA(random_state=0), |
| 75 decomposition.IncrementalPCA(), | 98 decomposition.IncrementalPCA(), |
| 76 decomposition.KernelPCA(random_state=0, n_jobs=N_JOBS), | 99 decomposition.KernelPCA(random_state=0, n_jobs=N_JOBS), |
| 77 decomposition.LatentDirichletAllocation( | 100 decomposition.LatentDirichletAllocation(random_state=0, n_jobs=N_JOBS), |
| 78 random_state=0, n_jobs=N_JOBS), | |
| 79 decomposition.MiniBatchDictionaryLearning( | 101 decomposition.MiniBatchDictionaryLearning( |
| 80 random_state=0, n_jobs=N_JOBS), | 102 random_state=0, n_jobs=N_JOBS |
| 81 decomposition.MiniBatchSparsePCA( | 103 ), |
| 82 random_state=0, n_jobs=N_JOBS), | 104 decomposition.MiniBatchSparsePCA(random_state=0, n_jobs=N_JOBS), |
| 83 decomposition.NMF(random_state=0), | 105 decomposition.NMF(random_state=0), |
| 84 decomposition.PCA(random_state=0), | 106 decomposition.PCA(random_state=0), |
| 85 decomposition.SparsePCA(random_state=0, n_jobs=N_JOBS), | 107 decomposition.SparsePCA(random_state=0, n_jobs=N_JOBS), |
| 86 decomposition.TruncatedSVD(random_state=0), | 108 decomposition.TruncatedSVD(random_state=0), |
| 87 kernel_approximation.Nystroem(random_state=0), | 109 kernel_approximation.Nystroem(random_state=0), |
| 92 skrebate.ReliefF(n_jobs=N_JOBS), | 114 skrebate.ReliefF(n_jobs=N_JOBS), |
| 93 skrebate.SURF(n_jobs=N_JOBS), | 115 skrebate.SURF(n_jobs=N_JOBS), |
| 94 skrebate.SURFstar(n_jobs=N_JOBS), | 116 skrebate.SURFstar(n_jobs=N_JOBS), |
| 95 skrebate.MultiSURF(n_jobs=N_JOBS), | 117 skrebate.MultiSURF(n_jobs=N_JOBS), |
| 96 skrebate.MultiSURFstar(n_jobs=N_JOBS), | 118 skrebate.MultiSURFstar(n_jobs=N_JOBS), |
| 97 imblearn.under_sampling.ClusterCentroids( | 119 imblearn.under_sampling.ClusterCentroids(random_state=0, n_jobs=N_JOBS), |
| 98 random_state=0, n_jobs=N_JOBS), | |
| 99 imblearn.under_sampling.CondensedNearestNeighbour( | 120 imblearn.under_sampling.CondensedNearestNeighbour( |
| 100 random_state=0, n_jobs=N_JOBS), | 121 random_state=0, n_jobs=N_JOBS |
| 101 imblearn.under_sampling.EditedNearestNeighbours( | 122 ), |
| 102 random_state=0, n_jobs=N_JOBS), | 123 imblearn.under_sampling.EditedNearestNeighbours(n_jobs=N_JOBS), |
| 103 imblearn.under_sampling.RepeatedEditedNearestNeighbours( | 124 imblearn.under_sampling.RepeatedEditedNearestNeighbours(n_jobs=N_JOBS), |
| 104 random_state=0, n_jobs=N_JOBS), | 125 imblearn.under_sampling.AllKNN(n_jobs=N_JOBS), |
| 105 imblearn.under_sampling.AllKNN(random_state=0, n_jobs=N_JOBS), | |
| 106 imblearn.under_sampling.InstanceHardnessThreshold( | 126 imblearn.under_sampling.InstanceHardnessThreshold( |
| 107 random_state=0, n_jobs=N_JOBS), | 127 random_state=0, n_jobs=N_JOBS |
| 108 imblearn.under_sampling.NearMiss( | 128 ), |
| 109 random_state=0, n_jobs=N_JOBS), | 129 imblearn.under_sampling.NearMiss(n_jobs=N_JOBS), |
| 110 imblearn.under_sampling.NeighbourhoodCleaningRule( | 130 imblearn.under_sampling.NeighbourhoodCleaningRule(n_jobs=N_JOBS), |
| 111 random_state=0, n_jobs=N_JOBS), | |
| 112 imblearn.under_sampling.OneSidedSelection( | 131 imblearn.under_sampling.OneSidedSelection( |
| 113 random_state=0, n_jobs=N_JOBS), | 132 random_state=0, n_jobs=N_JOBS |
| 114 imblearn.under_sampling.RandomUnderSampler( | 133 ), |
| 115 random_state=0), | 134 imblearn.under_sampling.RandomUnderSampler(random_state=0), |
| 116 imblearn.under_sampling.TomekLinks( | 135 imblearn.under_sampling.TomekLinks(n_jobs=N_JOBS), |
| 117 random_state=0, n_jobs=N_JOBS), | |
| 118 imblearn.over_sampling.ADASYN(random_state=0, n_jobs=N_JOBS), | 136 imblearn.over_sampling.ADASYN(random_state=0, n_jobs=N_JOBS), |
| 137 imblearn.over_sampling.BorderlineSMOTE(random_state=0, n_jobs=N_JOBS), | |
| 138 imblearn.over_sampling.KMeansSMOTE(random_state=0, n_jobs=N_JOBS), | |
| 119 imblearn.over_sampling.RandomOverSampler(random_state=0), | 139 imblearn.over_sampling.RandomOverSampler(random_state=0), |
| 120 imblearn.over_sampling.SMOTE(random_state=0, n_jobs=N_JOBS), | 140 imblearn.over_sampling.SMOTE(random_state=0, n_jobs=N_JOBS), |
| 141 imblearn.over_sampling.SMOTEN(random_state=0, n_jobs=N_JOBS), | |
| 142 imblearn.over_sampling.SMOTENC( | |
| 143 categorical_features=[], random_state=0, n_jobs=N_JOBS | |
| 144 ), | |
| 121 imblearn.over_sampling.SVMSMOTE(random_state=0, n_jobs=N_JOBS), | 145 imblearn.over_sampling.SVMSMOTE(random_state=0, n_jobs=N_JOBS), |
| 122 imblearn.over_sampling.BorderlineSMOTE( | |
| 123 random_state=0, n_jobs=N_JOBS), | |
| 124 imblearn.over_sampling.SMOTENC( | |
| 125 categorical_features=[], random_state=0, n_jobs=N_JOBS), | |
| 126 imblearn.combine.SMOTEENN(random_state=0), | 146 imblearn.combine.SMOTEENN(random_state=0), |
| 127 imblearn.combine.SMOTETomek(random_state=0)) | 147 imblearn.combine.SMOTETomek(random_state=0), |
| 148 ) | |
| 128 newlist = [] | 149 newlist = [] |
| 129 for obj in ev: | 150 for obj in ev: |
| 130 if obj is None: | 151 if obj is None: |
| 131 newlist.append(None) | 152 newlist.append(None) |
| 132 elif obj == 'all_0': | 153 elif obj == "all_0": |
| 133 newlist.extend(preprocessings[0:35]) | 154 newlist.extend(preprocessings[0:35]) |
| 134 elif obj == 'sk_prep_all': # no KernalCenter() | 155 elif obj == "sk_prep_all": # no KernalCenter() |
| 135 newlist.extend(preprocessings[0:7]) | 156 newlist.extend(preprocessings[0:7]) |
| 136 elif obj == 'fs_all': | 157 elif obj == "fs_all": |
| 137 newlist.extend(preprocessings[7:14]) | 158 newlist.extend(preprocessings[7:14]) |
| 138 elif obj == 'decomp_all': | 159 elif obj == "decomp_all": |
| 139 newlist.extend(preprocessings[14:25]) | 160 newlist.extend(preprocessings[14:25]) |
| 140 elif obj == 'k_appr_all': | 161 elif obj == "k_appr_all": |
| 141 newlist.extend(preprocessings[25:29]) | 162 newlist.extend(preprocessings[25:29]) |
| 142 elif obj == 'reb_all': | 163 elif obj == "reb_all": |
| 143 newlist.extend(preprocessings[30:35]) | 164 newlist.extend(preprocessings[30:35]) |
| 144 elif obj == 'imb_all': | 165 elif obj == "imb_all": |
| 145 newlist.extend(preprocessings[35:54]) | 166 newlist.extend(preprocessings[35:54]) |
| 146 elif type(obj) is int and -1 < obj < len(preprocessings): | 167 elif type(obj) is int and -1 < obj < len(preprocessings): |
| 147 newlist.append(preprocessings[obj]) | 168 newlist.append(preprocessings[obj]) |
| 148 elif hasattr(obj, 'get_params'): # user uploaded object | 169 elif hasattr(obj, "get_params"): # user uploaded object |
| 149 if 'n_jobs' in obj.get_params(): | 170 if "n_jobs" in obj.get_params(): |
| 150 newlist.append(obj.set_params(n_jobs=N_JOBS)) | 171 newlist.append(obj.set_params(n_jobs=N_JOBS)) |
| 151 else: | 172 else: |
| 152 newlist.append(obj) | 173 newlist.append(obj) |
| 153 else: | 174 else: |
| 154 sys.exit("Unsupported estimator type: %r" % (obj)) | 175 sys.exit("Unsupported estimator type: %r" % (obj)) |
| 156 search_params[param_name] = newlist | 177 search_params[param_name] = newlist |
| 157 | 178 |
| 158 return search_params | 179 return search_params |
| 159 | 180 |
| 160 | 181 |
| 161 def _handle_X_y(estimator, params, infile1, infile2, loaded_df={}, | 182 def _handle_X_y( |
| 162 ref_seq=None, intervals=None, targets=None, | 183 estimator, |
| 163 fasta_path=None): | 184 params, |
| 185 infile1, | |
| 186 infile2, | |
| 187 loaded_df={}, | |
| 188 ref_seq=None, | |
| 189 intervals=None, | |
| 190 targets=None, | |
| 191 fasta_path=None, | |
| 192 ): | |
| 164 """read inputs | 193 """read inputs |
| 165 | 194 |
| 166 Params | 195 Params |
| 167 ------- | 196 ------- |
| 168 estimator : estimator object | 197 estimator : estimator object |
| 190 X : numpy array | 219 X : numpy array |
| 191 y : numpy array | 220 y : numpy array |
| 192 """ | 221 """ |
| 193 estimator_params = estimator.get_params() | 222 estimator_params = estimator.get_params() |
| 194 | 223 |
| 195 input_type = params['input_options']['selected_input'] | 224 input_type = params["input_options"]["selected_input"] |
| 196 # tabular input | 225 # tabular input |
| 197 if input_type == 'tabular': | 226 if input_type == "tabular": |
| 198 header = 'infer' if params['input_options']['header1'] else None | 227 header = "infer" if params["input_options"]["header1"] else None |
| 199 column_option = (params['input_options']['column_selector_options_1'] | 228 column_option = params["input_options"]["column_selector_options_1"][ |
| 200 ['selected_column_selector_option']) | 229 "selected_column_selector_option" |
| 201 if column_option in ['by_index_number', 'all_but_by_index_number', | 230 ] |
| 202 'by_header_name', 'all_but_by_header_name']: | 231 if column_option in [ |
| 203 c = params['input_options']['column_selector_options_1']['col1'] | 232 "by_index_number", |
| 233 "all_but_by_index_number", | |
| 234 "by_header_name", | |
| 235 "all_but_by_header_name", | |
| 236 ]: | |
| 237 c = params["input_options"]["column_selector_options_1"]["col1"] | |
| 204 else: | 238 else: |
| 205 c = None | 239 c = None |
| 206 | 240 |
| 207 df_key = infile1 + repr(header) | 241 df_key = infile1 + repr(header) |
| 208 | 242 |
| 209 if df_key in loaded_df: | 243 if df_key in loaded_df: |
| 210 infile1 = loaded_df[df_key] | 244 infile1 = loaded_df[df_key] |
| 211 | 245 |
| 212 df = pd.read_csv(infile1, sep='\t', header=header, | 246 df = pd.read_csv(infile1, sep="\t", header=header, parse_dates=True) |
| 213 parse_dates=True) | |
| 214 loaded_df[df_key] = df | 247 loaded_df[df_key] = df |
| 215 | 248 |
| 216 X = read_columns(df, c=c, c_option=column_option).astype(float) | 249 X = read_columns(df, c=c, c_option=column_option).astype(float) |
| 217 # sparse input | 250 # sparse input |
| 218 elif input_type == 'sparse': | 251 elif input_type == "sparse": |
| 219 X = mmread(open(infile1, 'r')) | 252 X = mmread(open(infile1, "r")) |
| 220 | 253 |
| 221 # fasta_file input | 254 # fasta_file input |
| 222 elif input_type == 'seq_fasta': | 255 elif input_type == "seq_fasta": |
| 223 pyfaidx = get_module('pyfaidx') | 256 pyfaidx = get_module("pyfaidx") |
| 224 sequences = pyfaidx.Fasta(fasta_path) | 257 sequences = pyfaidx.Fasta(fasta_path) |
| 225 n_seqs = len(sequences.keys()) | 258 n_seqs = len(sequences.keys()) |
| 226 X = np.arange(n_seqs)[:, np.newaxis] | 259 X = np.arange(n_seqs)[:, np.newaxis] |
| 227 for param in estimator_params.keys(): | 260 for param in estimator_params.keys(): |
| 228 if param.endswith('fasta_path'): | 261 if param.endswith("fasta_path"): |
| 229 estimator.set_params( | 262 estimator.set_params(**{param: fasta_path}) |
| 230 **{param: fasta_path}) | |
| 231 break | 263 break |
| 232 else: | 264 else: |
| 233 raise ValueError( | 265 raise ValueError( |
| 234 "The selected estimator doesn't support " | 266 "The selected estimator doesn't support " |
| 235 "fasta file input! Please consider using " | 267 "fasta file input! Please consider using " |
| 236 "KerasGBatchClassifier with " | 268 "KerasGBatchClassifier with " |
| 237 "FastaDNABatchGenerator/FastaProteinBatchGenerator " | 269 "FastaDNABatchGenerator/FastaProteinBatchGenerator " |
| 238 "or having GenomeOneHotEncoder/ProteinOneHotEncoder " | 270 "or having GenomeOneHotEncoder/ProteinOneHotEncoder " |
| 239 "in pipeline!") | 271 "in pipeline!" |
| 240 | 272 ) |
| 241 elif input_type == 'refseq_and_interval': | 273 |
| 274 elif input_type == "refseq_and_interval": | |
| 242 path_params = { | 275 path_params = { |
| 243 'data_batch_generator__ref_genome_path': ref_seq, | 276 "data_batch_generator__ref_genome_path": ref_seq, |
| 244 'data_batch_generator__intervals_path': intervals, | 277 "data_batch_generator__intervals_path": intervals, |
| 245 'data_batch_generator__target_path': targets | 278 "data_batch_generator__target_path": targets, |
| 246 } | 279 } |
| 247 estimator.set_params(**path_params) | 280 estimator.set_params(**path_params) |
| 248 n_intervals = sum(1 for line in open(intervals)) | 281 n_intervals = sum(1 for line in open(intervals)) |
| 249 X = np.arange(n_intervals)[:, np.newaxis] | 282 X = np.arange(n_intervals)[:, np.newaxis] |
| 250 | 283 |
| 251 # Get target y | 284 # Get target y |
| 252 header = 'infer' if params['input_options']['header2'] else None | 285 header = "infer" if params["input_options"]["header2"] else None |
| 253 column_option = (params['input_options']['column_selector_options_2'] | 286 column_option = params["input_options"]["column_selector_options_2"][ |
| 254 ['selected_column_selector_option2']) | 287 "selected_column_selector_option2" |
| 255 if column_option in ['by_index_number', 'all_but_by_index_number', | 288 ] |
| 256 'by_header_name', 'all_but_by_header_name']: | 289 if column_option in [ |
| 257 c = params['input_options']['column_selector_options_2']['col2'] | 290 "by_index_number", |
| 291 "all_but_by_index_number", | |
| 292 "by_header_name", | |
| 293 "all_but_by_header_name", | |
| 294 ]: | |
| 295 c = params["input_options"]["column_selector_options_2"]["col2"] | |
| 258 else: | 296 else: |
| 259 c = None | 297 c = None |
| 260 | 298 |
| 261 df_key = infile2 + repr(header) | 299 df_key = infile2 + repr(header) |
| 262 if df_key in loaded_df: | 300 if df_key in loaded_df: |
| 263 infile2 = loaded_df[df_key] | 301 infile2 = loaded_df[df_key] |
| 264 else: | 302 else: |
| 265 infile2 = pd.read_csv(infile2, sep='\t', | 303 infile2 = pd.read_csv(infile2, sep="\t", header=header, parse_dates=True) |
| 266 header=header, parse_dates=True) | |
| 267 loaded_df[df_key] = infile2 | 304 loaded_df[df_key] = infile2 |
| 268 | 305 |
| 269 y = read_columns( | 306 y = read_columns( |
| 270 infile2, | 307 infile2, |
| 271 c=c, | 308 c=c, |
| 272 c_option=column_option, | 309 c_option=column_option, |
| 273 sep='\t', | 310 sep="\t", |
| 274 header=header, | 311 header=header, |
| 275 parse_dates=True) | 312 parse_dates=True, |
| 313 ) | |
| 276 if len(y.shape) == 2 and y.shape[1] == 1: | 314 if len(y.shape) == 2 and y.shape[1] == 1: |
| 277 y = y.ravel() | 315 y = y.ravel() |
| 278 if input_type == 'refseq_and_interval': | 316 if input_type == "refseq_and_interval": |
| 279 estimator.set_params( | 317 estimator.set_params(data_batch_generator__features=y.ravel().tolist()) |
| 280 data_batch_generator__features=y.ravel().tolist()) | |
| 281 y = None | 318 y = None |
| 282 # end y | 319 # end y |
| 283 | 320 |
| 284 return estimator, X, y | 321 return estimator, X, y |
| 285 | 322 |
| 286 | 323 |
| 287 def _do_outer_cv(searcher, X, y, outer_cv, scoring, error_score='raise', | 324 def _do_outer_cv(searcher, X, y, outer_cv, scoring, error_score="raise", outfile=None): |
| 288 outfile=None): | |
| 289 """Do outer cross-validation for nested CV | 325 """Do outer cross-validation for nested CV |
| 290 | 326 |
| 291 Parameters | 327 Parameters |
| 292 ---------- | 328 ---------- |
| 293 searcher : object | 329 searcher : object |
| 303 error_score: str, float or numpy float | 339 error_score: str, float or numpy float |
| 304 Whether to raise fit error or return an value | 340 Whether to raise fit error or return an value |
| 305 outfile : str | 341 outfile : str |
| 306 File path to store the restuls | 342 File path to store the restuls |
| 307 """ | 343 """ |
| 308 if error_score == 'raise': | 344 if error_score == "raise": |
| 309 rval = cross_validate( | 345 rval = cross_validate( |
| 310 searcher, X, y, scoring=scoring, | 346 searcher, |
| 311 cv=outer_cv, n_jobs=N_JOBS, verbose=0, | 347 X, |
| 312 error_score=error_score) | 348 y, |
| 349 scoring=scoring, | |
| 350 cv=outer_cv, | |
| 351 n_jobs=N_JOBS, | |
| 352 verbose=0, | |
| 353 error_score=error_score, | |
| 354 ) | |
| 313 else: | 355 else: |
| 314 warnings.simplefilter('always', FitFailedWarning) | 356 warnings.simplefilter("always", FitFailedWarning) |
| 315 with warnings.catch_warnings(record=True) as w: | 357 with warnings.catch_warnings(record=True) as w: |
| 316 try: | 358 try: |
| 317 rval = cross_validate( | 359 rval = cross_validate( |
| 318 searcher, X, y, | 360 searcher, |
| 361 X, | |
| 362 y, | |
| 319 scoring=scoring, | 363 scoring=scoring, |
| 320 cv=outer_cv, n_jobs=N_JOBS, | 364 cv=outer_cv, |
| 365 n_jobs=N_JOBS, | |
| 321 verbose=0, | 366 verbose=0, |
| 322 error_score=error_score) | 367 error_score=error_score, |
| 368 ) | |
| 323 except ValueError: | 369 except ValueError: |
| 324 pass | 370 pass |
| 325 for warning in w: | 371 for warning in w: |
| 326 print(repr(warning.message)) | 372 print(repr(warning.message)) |
| 327 | 373 |
| 328 keys = list(rval.keys()) | 374 keys = list(rval.keys()) |
| 329 for k in keys: | 375 for k in keys: |
| 330 if k.startswith('test'): | 376 if k.startswith("test"): |
| 331 rval['mean_' + k] = np.mean(rval[k]) | 377 rval["mean_" + k] = np.mean(rval[k]) |
| 332 rval['std_' + k] = np.std(rval[k]) | 378 rval["std_" + k] = np.std(rval[k]) |
| 333 if k.endswith('time'): | 379 if k.endswith("time"): |
| 334 rval.pop(k) | 380 rval.pop(k) |
| 335 rval = pd.DataFrame(rval) | 381 rval = pd.DataFrame(rval) |
| 336 rval = rval[sorted(rval.columns)] | 382 rval = rval[sorted(rval.columns)] |
| 337 rval.to_csv(path_or_buf=outfile, sep='\t', header=True, index=False) | 383 rval.to_csv(path_or_buf=outfile, sep="\t", header=True, index=False) |
| 338 | 384 |
| 339 | 385 |
| 340 def _do_train_test_split_val(searcher, X, y, params, error_score='raise', | 386 def _do_train_test_split_val( |
| 341 primary_scoring=None, groups=None, | 387 searcher, |
| 342 outfile=None): | 388 X, |
| 343 """ do train test split, searchCV validates on the train and then use | 389 y, |
| 390 params, | |
| 391 error_score="raise", | |
| 392 primary_scoring=None, | |
| 393 groups=None, | |
| 394 outfile=None, | |
| 395 ): | |
| 396 """do train test split, searchCV validates on the train and then use | |
| 344 the best_estimator_ to evaluate on the test | 397 the best_estimator_ to evaluate on the test |
| 345 | 398 |
| 346 Returns | 399 Returns |
| 347 -------- | 400 -------- |
| 348 Fitted SearchCV object | 401 Fitted SearchCV object |
| 349 """ | 402 """ |
| 350 train_test_split = try_get_attr( | 403 train_test_split = try_get_attr("galaxy_ml.model_validations", "train_test_split") |
| 351 'galaxy_ml.model_validations', 'train_test_split') | 404 split_options = params["outer_split"] |
| 352 split_options = params['outer_split'] | |
| 353 | 405 |
| 354 # splits | 406 # splits |
| 355 if split_options['shuffle'] == 'stratified': | 407 if split_options["shuffle"] == "stratified": |
| 356 split_options['labels'] = y | 408 split_options["labels"] = y |
| 357 X, X_test, y, y_test = train_test_split(X, y, **split_options) | 409 X, X_test, y, y_test = train_test_split(X, y, **split_options) |
| 358 elif split_options['shuffle'] == 'group': | 410 elif split_options["shuffle"] == "group": |
| 359 if groups is None: | 411 if groups is None: |
| 360 raise ValueError("No group based CV option was choosen for " | 412 raise ValueError( |
| 361 "group shuffle!") | 413 "No group based CV option was choosen for " "group shuffle!" |
| 362 split_options['labels'] = groups | 414 ) |
| 415 split_options["labels"] = groups | |
| 363 if y is None: | 416 if y is None: |
| 364 X, X_test, groups, _ =\ | 417 X, X_test, groups, _ = train_test_split(X, groups, **split_options) |
| 365 train_test_split(X, groups, **split_options) | |
| 366 else: | 418 else: |
| 367 X, X_test, y, y_test, groups, _ =\ | 419 X, X_test, y, y_test, groups, _ = train_test_split( |
| 368 train_test_split(X, y, groups, **split_options) | 420 X, y, groups, **split_options |
| 421 ) | |
| 369 else: | 422 else: |
| 370 if split_options['shuffle'] == 'None': | 423 if split_options["shuffle"] == "None": |
| 371 split_options['shuffle'] = None | 424 split_options["shuffle"] = None |
| 372 X, X_test, y, y_test =\ | 425 X, X_test, y, y_test = train_test_split(X, y, **split_options) |
| 373 train_test_split(X, y, **split_options) | 426 |
| 374 | 427 if error_score == "raise": |
| 375 if error_score == 'raise': | |
| 376 searcher.fit(X, y, groups=groups) | 428 searcher.fit(X, y, groups=groups) |
| 377 else: | 429 else: |
| 378 warnings.simplefilter('always', FitFailedWarning) | 430 warnings.simplefilter("always", FitFailedWarning) |
| 379 with warnings.catch_warnings(record=True) as w: | 431 with warnings.catch_warnings(record=True) as w: |
| 380 try: | 432 try: |
| 381 searcher.fit(X, y, groups=groups) | 433 searcher.fit(X, y, groups=groups) |
| 382 except ValueError: | 434 except ValueError: |
| 383 pass | 435 pass |
| 384 for warning in w: | 436 for warning in w: |
| 385 print(repr(warning.message)) | 437 print(repr(warning.message)) |
| 386 | 438 |
| 387 scorer_ = searcher.scorer_ | 439 scorer_ = searcher.scorer_ |
| 388 if isinstance(scorer_, collections.Mapping): | 440 |
| 389 is_multimetric = True | 441 best_estimator_ = getattr(searcher, "best_estimator_") |
| 442 | |
| 443 # TODO Solve deep learning models in pipeline | |
| 444 if best_estimator_.__class__.__name__ == "KerasGBatchClassifier": | |
| 445 test_score = best_estimator_.evaluate( | |
| 446 X_test, | |
| 447 scorer=scorer_, | |
| 448 ) | |
| 390 else: | 449 else: |
| 391 is_multimetric = False | 450 test_score = _score(best_estimator_, X_test, y_test, scorer_) |
| 392 | 451 |
| 393 best_estimator_ = getattr(searcher, 'best_estimator_') | 452 if not isinstance(scorer_, dict): |
| 394 | |
| 395 # TODO Solve deep learning models in pipeline | |
| 396 if best_estimator_.__class__.__name__ == 'KerasGBatchClassifier': | |
| 397 test_score = best_estimator_.evaluate( | |
| 398 X_test, scorer=scorer_, is_multimetric=is_multimetric) | |
| 399 else: | |
| 400 test_score = _score(best_estimator_, X_test, | |
| 401 y_test, scorer_, | |
| 402 is_multimetric=is_multimetric) | |
| 403 | |
| 404 if not is_multimetric: | |
| 405 test_score = {primary_scoring: test_score} | 453 test_score = {primary_scoring: test_score} |
| 406 for key, value in test_score.items(): | 454 for key, value in test_score.items(): |
| 407 test_score[key] = [value] | 455 test_score[key] = [value] |
| 408 result_df = pd.DataFrame(test_score) | 456 result_df = pd.DataFrame(test_score) |
| 409 result_df.to_csv(path_or_buf=outfile, sep='\t', header=True, | 457 result_df.to_csv(path_or_buf=outfile, sep="\t", header=True, index=False) |
| 410 index=False) | |
| 411 | 458 |
| 412 return searcher | 459 return searcher |
| 413 | 460 |
| 414 | 461 |
| 415 def main(inputs, infile_estimator, infile1, infile2, | 462 def _set_memory(estimator, memory): |
| 416 outfile_result, outfile_object=None, | 463 """set memeory cache |
| 417 outfile_weights=None, groups=None, | 464 |
| 418 ref_seq=None, intervals=None, targets=None, | 465 Parameters |
| 419 fasta_path=None): | 466 ---------- |
| 467 estimator : python object | |
| 468 memory : joblib.Memory object | |
| 469 | |
| 470 Returns | |
| 471 ------- | |
| 472 estimator : estimator object after setting new attributes | |
| 473 """ | |
| 474 if isinstance(estimator, IRAPSClassifier): | |
| 475 estimator.set_params(memory=memory) | |
| 476 return estimator | |
| 477 | |
| 478 estimator_params = estimator.get_params() | |
| 479 | |
| 480 new_params = {} | |
| 481 for k in estimator_params.keys(): | |
| 482 if k.endswith("irapsclassifier__memory"): | |
| 483 new_params[k] = memory | |
| 484 | |
| 485 estimator.set_params(**new_params) | |
| 486 | |
| 487 return estimator | |
| 488 | |
| 489 | |
| 490 def main( | |
| 491 inputs, | |
| 492 infile_estimator, | |
| 493 infile1, | |
| 494 infile2, | |
| 495 outfile_result, | |
| 496 outfile_object=None, | |
| 497 groups=None, | |
| 498 ref_seq=None, | |
| 499 intervals=None, | |
| 500 targets=None, | |
| 501 fasta_path=None, | |
| 502 ): | |
| 420 """ | 503 """ |
| 421 Parameter | 504 Parameter |
| 422 --------- | 505 --------- |
| 423 inputs : str | 506 inputs : str |
| 424 File path to galaxy tool parameter | 507 File path to galaxy tool parameter. |
| 425 | 508 |
| 426 infile_estimator : str | 509 infile_estimator : str |
| 427 File path to estimator | 510 File path to estimator. |
| 428 | 511 |
| 429 infile1 : str | 512 infile1 : str |
| 430 File path to dataset containing features | 513 File path to dataset containing features |
| 431 | 514 |
| 432 infile2 : str | 515 infile2 : str |
| 435 outfile_result : str | 518 outfile_result : str |
| 436 File path to save the results, either cv_results or test result | 519 File path to save the results, either cv_results or test result |
| 437 | 520 |
| 438 outfile_object : str, optional | 521 outfile_object : str, optional |
| 439 File path to save searchCV object | 522 File path to save searchCV object |
| 440 | |
| 441 outfile_weights : str, optional | |
| 442 File path to save model weights | |
| 443 | 523 |
| 444 groups : str | 524 groups : str |
| 445 File path to dataset containing groups labels | 525 File path to dataset containing groups labels |
| 446 | 526 |
| 447 ref_seq : str | 527 ref_seq : str |
| 454 File path to dataset compressed target bed file | 534 File path to dataset compressed target bed file |
| 455 | 535 |
| 456 fasta_path : str | 536 fasta_path : str |
| 457 File path to dataset containing fasta file | 537 File path to dataset containing fasta file |
| 458 """ | 538 """ |
| 459 warnings.simplefilter('ignore') | 539 warnings.simplefilter("ignore") |
| 460 | 540 |
| 461 # store read dataframe object | 541 # store read dataframe object |
| 462 loaded_df = {} | 542 loaded_df = {} |
| 463 | 543 |
| 464 with open(inputs, 'r') as param_handler: | 544 with open(inputs, "r") as param_handler: |
| 465 params = json.load(param_handler) | 545 params = json.load(param_handler) |
| 466 | 546 |
| 467 # Override the refit parameter | 547 # Override the refit parameter |
| 468 params['search_schemes']['options']['refit'] = True \ | 548 params["options"]["refit"] = ( |
| 469 if params['save'] != 'nope' else False | 549 True |
| 470 | 550 if ( |
| 471 with open(infile_estimator, 'rb') as estimator_handler: | 551 params["save"] != "nope" |
| 472 estimator = load_model(estimator_handler) | 552 or params["outer_split"]["split_mode"] == "nested_cv" |
| 473 | 553 ) |
| 474 optimizer = params['search_schemes']['selected_search_scheme'] | 554 else False |
| 475 optimizer = getattr(model_selection, optimizer) | 555 ) |
| 556 | |
| 557 estimator = load_model_from_h5(infile_estimator) | |
| 558 | |
| 559 estimator = clean_params(estimator) | |
| 560 | |
| 561 if estimator.__class__.__name__ == "KerasGBatchClassifier": | |
| 562 _fit_and_score = try_get_attr( | |
| 563 "galaxy_ml.model_validations", | |
| 564 "_fit_and_score", | |
| 565 ) | |
| 566 | |
| 567 setattr(_search, "_fit_and_score", _fit_and_score) | |
| 568 setattr(_validation, "_fit_and_score", _fit_and_score) | |
| 569 | |
| 570 search_algos_and_options = params["search_algos"] | |
| 571 optimizer = search_algos_and_options.pop("selected_search_algo") | |
| 572 if optimizer == "skopt.BayesSearchCV": | |
| 573 optimizer = BayesSearchCV | |
| 574 else: | |
| 575 optimizer = getattr(model_selection, optimizer) | |
| 476 | 576 |
| 477 # handle gridsearchcv options | 577 # handle gridsearchcv options |
| 478 options = params['search_schemes']['options'] | 578 options = params["options"] |
| 579 options.update(search_algos_and_options) | |
| 479 | 580 |
| 480 if groups: | 581 if groups: |
| 481 header = 'infer' if (options['cv_selector']['groups_selector'] | 582 header = ( |
| 482 ['header_g']) else None | 583 "infer" if (options["cv_selector"]["groups_selector"]["header_g"]) else None |
| 483 column_option = (options['cv_selector']['groups_selector'] | 584 ) |
| 484 ['column_selector_options_g'] | 585 column_option = options["cv_selector"]["groups_selector"][ |
| 485 ['selected_column_selector_option_g']) | 586 "column_selector_options_g" |
| 486 if column_option in ['by_index_number', 'all_but_by_index_number', | 587 ]["selected_column_selector_option_g"] |
| 487 'by_header_name', 'all_but_by_header_name']: | 588 if column_option in [ |
| 488 c = (options['cv_selector']['groups_selector'] | 589 "by_index_number", |
| 489 ['column_selector_options_g']['col_g']) | 590 "all_but_by_index_number", |
| 591 "by_header_name", | |
| 592 "all_but_by_header_name", | |
| 593 ]: | |
| 594 c = options["cv_selector"]["groups_selector"]["column_selector_options_g"][ | |
| 595 "col_g" | |
| 596 ] | |
| 490 else: | 597 else: |
| 491 c = None | 598 c = None |
| 492 | 599 |
| 493 df_key = groups + repr(header) | 600 df_key = groups + repr(header) |
| 494 | 601 |
| 495 groups = pd.read_csv(groups, sep='\t', header=header, | 602 groups = pd.read_csv(groups, sep="\t", header=header, parse_dates=True) |
| 496 parse_dates=True) | |
| 497 loaded_df[df_key] = groups | 603 loaded_df[df_key] = groups |
| 498 | 604 |
| 499 groups = read_columns( | 605 groups = read_columns( |
| 500 groups, | 606 groups, |
| 501 c=c, | 607 c=c, |
| 502 c_option=column_option, | 608 c_option=column_option, |
| 503 sep='\t', | 609 sep="\t", |
| 504 header=header, | 610 header=header, |
| 505 parse_dates=True) | 611 parse_dates=True, |
| 612 ) | |
| 506 groups = groups.ravel() | 613 groups = groups.ravel() |
| 507 options['cv_selector']['groups_selector'] = groups | 614 options["cv_selector"]["groups_selector"] = groups |
| 508 | 615 |
| 509 splitter, groups = get_cv(options.pop('cv_selector')) | 616 cv_selector = options.pop("cv_selector") |
| 510 options['cv'] = splitter | 617 if Version(galaxy_ml_version) < Version("0.8.3"): |
| 511 primary_scoring = options['scoring']['primary_scoring'] | 618 cv_selector.pop("n_stratification_bins", None) |
| 512 options['scoring'] = get_scoring(options['scoring']) | 619 splitter, groups = get_cv(cv_selector) |
| 513 if options['error_score']: | 620 options["cv"] = splitter |
| 514 options['error_score'] = 'raise' | 621 primary_scoring = options["scoring"]["primary_scoring"] |
| 622 options["scoring"] = get_scoring(options["scoring"]) | |
| 623 # TODO make BayesSearchCV support multiple scoring | |
| 624 if optimizer == "skopt.BayesSearchCV" and isinstance(options["scoring"], dict): | |
| 625 options["scoring"] = options["scoring"][primary_scoring] | |
| 626 warnings.warn( | |
| 627 "BayesSearchCV doesn't support multiple " | |
| 628 "scorings! Primary scoring is used." | |
| 629 ) | |
| 630 if options["error_score"]: | |
| 631 options["error_score"] = "raise" | |
| 515 else: | 632 else: |
| 516 options['error_score'] = np.NaN | 633 options["error_score"] = np.NaN |
| 517 if options['refit'] and isinstance(options['scoring'], dict): | 634 if options["refit"] and isinstance(options["scoring"], dict): |
| 518 options['refit'] = primary_scoring | 635 options["refit"] = primary_scoring |
| 519 if 'pre_dispatch' in options and options['pre_dispatch'] == '': | 636 if "pre_dispatch" in options and options["pre_dispatch"] == "": |
| 520 options['pre_dispatch'] = None | 637 options["pre_dispatch"] = None |
| 521 | 638 |
| 522 params_builder = params['search_schemes']['search_params_builder'] | 639 params_builder = params["search_params_builder"] |
| 523 param_grid = _eval_search_params(params_builder) | 640 param_grid = _eval_search_params(params_builder) |
| 524 | 641 |
| 525 estimator = clean_params(estimator) | |
| 526 | |
| 527 # save the SearchCV object without fit | 642 # save the SearchCV object without fit |
| 528 if params['save'] == 'save_no_fit': | 643 if params["save"] == "save_no_fit": |
| 529 searcher = optimizer(estimator, param_grid, **options) | 644 searcher = optimizer(estimator, param_grid, **options) |
| 530 print(searcher) | 645 dump_model_to_h5(searcher, outfile_object) |
| 531 with open(outfile_object, 'wb') as output_handler: | |
| 532 pickle.dump(searcher, output_handler, | |
| 533 pickle.HIGHEST_PROTOCOL) | |
| 534 return 0 | 646 return 0 |
| 535 | 647 |
| 536 # read inputs and loads new attributes, like paths | 648 # read inputs and loads new attributes, like paths |
| 537 estimator, X, y = _handle_X_y(estimator, params, infile1, infile2, | 649 estimator, X, y = _handle_X_y( |
| 538 loaded_df=loaded_df, ref_seq=ref_seq, | 650 estimator, |
| 539 intervals=intervals, targets=targets, | 651 params, |
| 540 fasta_path=fasta_path) | 652 infile1, |
| 653 infile2, | |
| 654 loaded_df=loaded_df, | |
| 655 ref_seq=ref_seq, | |
| 656 intervals=intervals, | |
| 657 targets=targets, | |
| 658 fasta_path=fasta_path, | |
| 659 ) | |
| 660 | |
| 661 label_encoder = LabelEncoder() | |
| 662 if get_main_estimator(estimator).__class__.__name__ == "XGBClassifier": | |
| 663 y = label_encoder.fit_transform(y) | |
| 541 | 664 |
| 542 # cache iraps_core fits could increase search speed significantly | 665 # cache iraps_core fits could increase search speed significantly |
| 543 memory = joblib.Memory(location=CACHE_DIR, verbose=0) | 666 memory = joblib.Memory(location=CACHE_DIR, verbose=0) |
| 544 main_est = get_main_estimator(estimator) | 667 estimator = _set_memory(estimator, memory) |
| 545 if main_est.__class__.__name__ == 'IRAPSClassifier': | |
| 546 main_est.set_params(memory=memory) | |
| 547 | 668 |
| 548 searcher = optimizer(estimator, param_grid, **options) | 669 searcher = optimizer(estimator, param_grid, **options) |
| 549 | 670 |
| 550 split_mode = params['outer_split'].pop('split_mode') | 671 split_mode = params["outer_split"].pop("split_mode") |
| 551 | 672 |
| 552 if split_mode == 'nested_cv': | 673 # Nested CV |
| 553 # make sure refit is choosen | 674 if split_mode == "nested_cv": |
| 554 # this could be True for sklearn models, but not the case for | 675 cv_selector = params["outer_split"]["cv_selector"] |
| 555 # deep learning models | 676 if Version(galaxy_ml_version) < Version("0.8.3"): |
| 556 if not options['refit'] and \ | 677 cv_selector.pop("n_stratification_bins", None) |
| 557 not all(hasattr(estimator, attr) | 678 outer_cv, _ = get_cv(cv_selector) |
| 558 for attr in ('config', 'model_type')): | |
| 559 warnings.warn("Refit is change to `True` for nested validation!") | |
| 560 setattr(searcher, 'refit', True) | |
| 561 | |
| 562 outer_cv, _ = get_cv(params['outer_split']['cv_selector']) | |
| 563 # nested CV, outer cv using cross_validate | 679 # nested CV, outer cv using cross_validate |
| 564 if options['error_score'] == 'raise': | 680 if options["error_score"] == "raise": |
| 565 rval = cross_validate( | 681 rval = cross_validate( |
| 566 searcher, X, y, scoring=options['scoring'], | 682 searcher, |
| 567 cv=outer_cv, n_jobs=N_JOBS, | 683 X, |
| 568 verbose=options['verbose'], | 684 y, |
| 569 return_estimator=(params['save'] == 'save_estimator'), | 685 groups=groups, |
| 570 error_score=options['error_score'], | 686 scoring=options["scoring"], |
| 571 return_train_score=True) | 687 cv=outer_cv, |
| 688 n_jobs=N_JOBS, | |
| 689 verbose=options["verbose"], | |
| 690 fit_params={"groups": groups}, | |
| 691 return_estimator=(params["save"] == "save_estimator"), | |
| 692 error_score=options["error_score"], | |
| 693 return_train_score=True, | |
| 694 ) | |
| 572 else: | 695 else: |
| 573 warnings.simplefilter('always', FitFailedWarning) | 696 warnings.simplefilter("always", FitFailedWarning) |
| 574 with warnings.catch_warnings(record=True) as w: | 697 with warnings.catch_warnings(record=True) as w: |
| 575 try: | 698 try: |
| 576 rval = cross_validate( | 699 rval = cross_validate( |
| 577 searcher, X, y, | 700 searcher, |
| 578 scoring=options['scoring'], | 701 X, |
| 579 cv=outer_cv, n_jobs=N_JOBS, | 702 y, |
| 580 verbose=options['verbose'], | 703 groups=groups, |
| 581 return_estimator=(params['save'] == 'save_estimator'), | 704 scoring=options["scoring"], |
| 582 error_score=options['error_score'], | 705 cv=outer_cv, |
| 583 return_train_score=True) | 706 n_jobs=N_JOBS, |
| 707 verbose=options["verbose"], | |
| 708 fit_params={"groups": groups}, | |
| 709 return_estimator=(params["save"] == "save_estimator"), | |
| 710 error_score=options["error_score"], | |
| 711 return_train_score=True, | |
| 712 ) | |
| 584 except ValueError: | 713 except ValueError: |
| 585 pass | 714 pass |
| 586 for warning in w: | 715 for warning in w: |
| 587 print(repr(warning.message)) | 716 print(repr(warning.message)) |
| 588 | 717 |
| 589 fitted_searchers = rval.pop('estimator', []) | 718 fitted_searchers = rval.pop("estimator", []) |
| 590 if fitted_searchers: | 719 if fitted_searchers: |
| 591 import os | 720 import os |
| 721 | |
| 592 pwd = os.getcwd() | 722 pwd = os.getcwd() |
| 593 save_dir = os.path.join(pwd, 'cv_results_in_folds') | 723 save_dir = os.path.join(pwd, "cv_results_in_folds") |
| 594 try: | 724 try: |
| 595 os.mkdir(save_dir) | 725 os.mkdir(save_dir) |
| 596 for idx, obj in enumerate(fitted_searchers): | 726 for idx, obj in enumerate(fitted_searchers): |
| 597 target_name = 'cv_results_' + '_' + 'split%d' % idx | 727 target_name = "cv_results_" + "_" + "split%d" % idx |
| 598 target_path = os.path.join(pwd, save_dir, target_name) | 728 target_path = os.path.join(pwd, save_dir, target_name) |
| 599 cv_results_ = getattr(obj, 'cv_results_', None) | 729 cv_results_ = getattr(obj, "cv_results_", None) |
| 600 if not cv_results_: | 730 if not cv_results_: |
| 601 print("%s is not available" % target_name) | 731 print("%s is not available" % target_name) |
| 602 continue | 732 continue |
| 603 cv_results_ = pd.DataFrame(cv_results_) | 733 cv_results_ = pd.DataFrame(cv_results_) |
| 604 cv_results_ = cv_results_[sorted(cv_results_.columns)] | 734 cv_results_ = cv_results_[sorted(cv_results_.columns)] |
| 605 cv_results_.to_csv(target_path, sep='\t', header=True, | 735 cv_results_.to_csv(target_path, sep="\t", header=True, index=False) |
| 606 index=False) | |
| 607 except Exception as e: | 736 except Exception as e: |
| 608 print(e) | 737 print(e) |
| 609 finally: | |
| 610 del os | |
| 611 | 738 |
| 612 keys = list(rval.keys()) | 739 keys = list(rval.keys()) |
| 613 for k in keys: | 740 for k in keys: |
| 614 if k.startswith('test'): | 741 if k.startswith("test"): |
| 615 rval['mean_' + k] = np.mean(rval[k]) | 742 rval["mean_" + k] = np.mean(rval[k]) |
| 616 rval['std_' + k] = np.std(rval[k]) | 743 rval["std_" + k] = np.std(rval[k]) |
| 617 if k.endswith('time'): | 744 if k.endswith("time"): |
| 618 rval.pop(k) | 745 rval.pop(k) |
| 619 rval = pd.DataFrame(rval) | 746 rval = pd.DataFrame(rval) |
| 620 rval = rval[sorted(rval.columns)] | 747 rval = rval[sorted(rval.columns)] |
| 621 rval.to_csv(path_or_buf=outfile_result, sep='\t', header=True, | 748 rval.to_csv(path_or_buf=outfile_result, sep="\t", header=True, index=False) |
| 622 index=False) | |
| 623 | 749 |
| 624 return 0 | 750 return 0 |
| 625 | 751 |
| 626 # deprecate train test split mode | 752 # deprecate train test split mode |
| 627 """searcher = _do_train_test_split_val( | 753 """searcher = _do_train_test_split_val( |
| 632 outfile=outfile_result)""" | 758 outfile=outfile_result)""" |
| 633 | 759 |
| 634 # no outer split | 760 # no outer split |
| 635 else: | 761 else: |
| 636 searcher.set_params(n_jobs=N_JOBS) | 762 searcher.set_params(n_jobs=N_JOBS) |
| 637 if options['error_score'] == 'raise': | 763 if options["error_score"] == "raise": |
| 638 searcher.fit(X, y, groups=groups) | 764 searcher.fit(X, y, groups=groups) |
| 639 else: | 765 else: |
| 640 warnings.simplefilter('always', FitFailedWarning) | 766 warnings.simplefilter("always", FitFailedWarning) |
| 641 with warnings.catch_warnings(record=True) as w: | 767 with warnings.catch_warnings(record=True) as w: |
| 642 try: | 768 try: |
| 643 searcher.fit(X, y, groups=groups) | 769 searcher.fit(X, y, groups=groups) |
| 644 except ValueError: | 770 except ValueError: |
| 645 pass | 771 pass |
| 646 for warning in w: | 772 for warning in w: |
| 647 print(repr(warning.message)) | 773 print(repr(warning.message)) |
| 648 | 774 |
| 649 cv_results = pd.DataFrame(searcher.cv_results_) | 775 cv_results = pd.DataFrame(searcher.cv_results_) |
| 650 cv_results = cv_results[sorted(cv_results.columns)] | 776 cv_results = cv_results[sorted(cv_results.columns)] |
| 651 cv_results.to_csv(path_or_buf=outfile_result, sep='\t', | 777 cv_results.to_csv( |
| 652 header=True, index=False) | 778 path_or_buf=outfile_result, sep="\t", header=True, index=False |
| 779 ) | |
| 653 | 780 |
| 654 memory.clear(warn=False) | 781 memory.clear(warn=False) |
| 655 | 782 |
| 656 # output best estimator, and weights if applicable | 783 # output best estimator, and weights if applicable |
| 657 if outfile_object: | 784 if outfile_object: |
| 658 best_estimator_ = getattr(searcher, 'best_estimator_', None) | 785 best_estimator_ = getattr(searcher, "best_estimator_", None) |
| 659 if not best_estimator_: | 786 if not best_estimator_: |
| 660 warnings.warn("GridSearchCV object has no attribute " | 787 warnings.warn( |
| 661 "'best_estimator_', because either it's " | 788 "GridSearchCV object has no attribute " |
| 662 "nested gridsearch or `refit` is False!") | 789 "'best_estimator_', because either it's " |
| 790 "nested gridsearch or `refit` is False!" | |
| 791 ) | |
| 663 return | 792 return |
| 664 | 793 |
| 665 # clean prams | 794 dump_model_to_h5(best_estimator_, outfile_object) |
| 666 best_estimator_ = clean_params(best_estimator_) | 795 |
| 667 | 796 |
| 668 main_est = get_main_estimator(best_estimator_) | 797 if __name__ == "__main__": |
| 669 | |
| 670 if hasattr(main_est, 'model_') \ | |
| 671 and hasattr(main_est, 'save_weights'): | |
| 672 if outfile_weights: | |
| 673 main_est.save_weights(outfile_weights) | |
| 674 del main_est.model_ | |
| 675 del main_est.fit_params | |
| 676 del main_est.model_class_ | |
| 677 del main_est.validation_data | |
| 678 if getattr(main_est, 'data_generator_', None): | |
| 679 del main_est.data_generator_ | |
| 680 | |
| 681 with open(outfile_object, 'wb') as output_handler: | |
| 682 print("Best estimator is saved: %s " % repr(best_estimator_)) | |
| 683 pickle.dump(best_estimator_, output_handler, | |
| 684 pickle.HIGHEST_PROTOCOL) | |
| 685 | |
| 686 | |
| 687 if __name__ == '__main__': | |
| 688 aparser = argparse.ArgumentParser() | 798 aparser = argparse.ArgumentParser() |
| 689 aparser.add_argument("-i", "--inputs", dest="inputs", required=True) | 799 aparser.add_argument("-i", "--inputs", dest="inputs", required=True) |
| 690 aparser.add_argument("-e", "--estimator", dest="infile_estimator") | 800 aparser.add_argument("-e", "--estimator", dest="infile_estimator") |
| 691 aparser.add_argument("-X", "--infile1", dest="infile1") | 801 aparser.add_argument("-X", "--infile1", dest="infile1") |
| 692 aparser.add_argument("-y", "--infile2", dest="infile2") | 802 aparser.add_argument("-y", "--infile2", dest="infile2") |
| 693 aparser.add_argument("-O", "--outfile_result", dest="outfile_result") | 803 aparser.add_argument("-O", "--outfile_result", dest="outfile_result") |
| 694 aparser.add_argument("-o", "--outfile_object", dest="outfile_object") | 804 aparser.add_argument("-o", "--outfile_object", dest="outfile_object") |
| 695 aparser.add_argument("-w", "--outfile_weights", dest="outfile_weights") | |
| 696 aparser.add_argument("-g", "--groups", dest="groups") | 805 aparser.add_argument("-g", "--groups", dest="groups") |
| 697 aparser.add_argument("-r", "--ref_seq", dest="ref_seq") | 806 aparser.add_argument("-r", "--ref_seq", dest="ref_seq") |
| 698 aparser.add_argument("-b", "--intervals", dest="intervals") | 807 aparser.add_argument("-b", "--intervals", dest="intervals") |
| 699 aparser.add_argument("-t", "--targets", dest="targets") | 808 aparser.add_argument("-t", "--targets", dest="targets") |
| 700 aparser.add_argument("-f", "--fasta_path", dest="fasta_path") | 809 aparser.add_argument("-f", "--fasta_path", dest="fasta_path") |
| 701 args = aparser.parse_args() | 810 args = aparser.parse_args() |
| 702 | 811 |
| 703 main(args.inputs, args.infile_estimator, args.infile1, args.infile2, | 812 main(**vars(args)) |
| 704 args.outfile_result, outfile_object=args.outfile_object, | |
| 705 outfile_weights=args.outfile_weights, groups=args.groups, | |
| 706 ref_seq=args.ref_seq, intervals=args.intervals, | |
| 707 targets=args.targets, fasta_path=args.fasta_path) |
