Mercurial > repos > bgruening > sklearn_train_test_split
comparison keras_train_and_eval.py @ 6:81ab4951f2a3 draft
"planemo upload for repository https://github.com/bgruening/galaxytools/tree/master/tools/sklearn commit ca87db9c038a6fcf96aa39da50f384865fd932ff"
| author | bgruening |
|---|---|
| date | Tue, 20 Apr 2021 17:09:29 +0000 |
| parents | c0ed68e280a7 |
| children | 82f89e379413 |
comparison
equal
deleted
inserted
replaced
| 5:c0ed68e280a7 | 6:81ab4951f2a3 |
|---|---|
| 9 import numpy as np | 9 import numpy as np |
| 10 import pandas as pd | 10 import pandas as pd |
| 11 from galaxy_ml.externals.selene_sdk.utils import compute_score | 11 from galaxy_ml.externals.selene_sdk.utils import compute_score |
| 12 from galaxy_ml.keras_galaxy_models import _predict_generator | 12 from galaxy_ml.keras_galaxy_models import _predict_generator |
| 13 from galaxy_ml.model_validations import train_test_split | 13 from galaxy_ml.model_validations import train_test_split |
| 14 from galaxy_ml.utils import ( | 14 from galaxy_ml.utils import (clean_params, get_main_estimator, |
| 15 clean_params, | 15 get_module, get_scoring, load_model, read_columns, |
| 16 get_main_estimator, | 16 SafeEval, try_get_attr) |
| 17 get_module, | |
| 18 get_scoring, | |
| 19 load_model, | |
| 20 read_columns, | |
| 21 SafeEval, | |
| 22 try_get_attr, | |
| 23 ) | |
| 24 from scipy.io import mmread | 17 from scipy.io import mmread |
| 25 from sklearn.metrics.scorer import _check_multimetric_scoring | 18 from sklearn.metrics.scorer import _check_multimetric_scoring |
| 26 from sklearn.model_selection import _search, _validation | 19 from sklearn.model_selection import _search, _validation |
| 27 from sklearn.model_selection._validation import _score | 20 from sklearn.model_selection._validation import _score |
| 28 from sklearn.pipeline import Pipeline | 21 from sklearn.pipeline import Pipeline |
| 29 from sklearn.utils import indexable, safe_indexing | 22 from sklearn.utils import indexable, safe_indexing |
| 30 | |
| 31 | 23 |
| 32 _fit_and_score = try_get_attr("galaxy_ml.model_validations", "_fit_and_score") | 24 _fit_and_score = try_get_attr("galaxy_ml.model_validations", "_fit_and_score") |
| 33 setattr(_search, "_fit_and_score", _fit_and_score) | 25 setattr(_search, "_fit_and_score", _fit_and_score) |
| 34 setattr(_validation, "_fit_and_score", _fit_and_score) | 26 setattr(_validation, "_fit_and_score", _fit_and_score) |
| 35 | 27 |
| 54 if swap_value == "": | 46 if swap_value == "": |
| 55 continue | 47 continue |
| 56 | 48 |
| 57 param_name = p["sp_name"] | 49 param_name = p["sp_name"] |
| 58 if param_name.lower().endswith(NON_SEARCHABLE): | 50 if param_name.lower().endswith(NON_SEARCHABLE): |
| 59 warnings.warn("Warning: `%s` is not eligible for search and was " "omitted!" % param_name) | 51 warnings.warn( |
| 52 "Warning: `%s` is not eligible for search and was " | |
| 53 "omitted!" % param_name | |
| 54 ) | |
| 60 continue | 55 continue |
| 61 | 56 |
| 62 if not swap_value.startswith(":"): | 57 if not swap_value.startswith(":"): |
| 63 safe_eval = SafeEval(load_scipy=True, load_numpy=True) | 58 safe_eval = SafeEval(load_scipy=True, load_numpy=True) |
| 64 ev = safe_eval(swap_value) | 59 ev = safe_eval(swap_value) |
| 97 groups = kwargs["labels"] | 92 groups = kwargs["labels"] |
| 98 n_samples = new_arrays[0].shape[0] | 93 n_samples = new_arrays[0].shape[0] |
| 99 index_arr = np.arange(n_samples) | 94 index_arr = np.arange(n_samples) |
| 100 test = index_arr[np.isin(groups, group_names)] | 95 test = index_arr[np.isin(groups, group_names)] |
| 101 train = index_arr[~np.isin(groups, group_names)] | 96 train = index_arr[~np.isin(groups, group_names)] |
| 102 rval = list(chain.from_iterable((safe_indexing(a, train), safe_indexing(a, test)) for a in new_arrays)) | 97 rval = list( |
| 98 chain.from_iterable( | |
| 99 (safe_indexing(a, train), safe_indexing(a, test)) for a in new_arrays | |
| 100 ) | |
| 101 ) | |
| 103 else: | 102 else: |
| 104 rval = train_test_split(*new_arrays, **kwargs) | 103 rval = train_test_split(*new_arrays, **kwargs) |
| 105 | 104 |
| 106 for pos in nones: | 105 for pos in nones: |
| 107 rval[pos * 2: 2] = [None, None] | 106 rval[pos * 2: 2] = [None, None] |
| 125 if y_true.ndim == 1 or y_true.shape[-1] == 1: | 124 if y_true.ndim == 1 or y_true.shape[-1] == 1: |
| 126 pred_probas = pred_probas.ravel() | 125 pred_probas = pred_probas.ravel() |
| 127 pred_labels = (pred_probas > 0.5).astype("int32") | 126 pred_labels = (pred_probas > 0.5).astype("int32") |
| 128 targets = y_true.ravel().astype("int32") | 127 targets = y_true.ravel().astype("int32") |
| 129 if not is_multimetric: | 128 if not is_multimetric: |
| 130 preds = pred_labels if scorer.__class__.__name__ == "_PredictScorer" else pred_probas | 129 preds = ( |
| 130 pred_labels | |
| 131 if scorer.__class__.__name__ == "_PredictScorer" | |
| 132 else pred_probas | |
| 133 ) | |
| 131 score = scorer._score_func(targets, preds, **scorer._kwargs) | 134 score = scorer._score_func(targets, preds, **scorer._kwargs) |
| 132 | 135 |
| 133 return score | 136 return score |
| 134 else: | 137 else: |
| 135 scores = {} | 138 scores = {} |
| 136 for name, one_scorer in scorer.items(): | 139 for name, one_scorer in scorer.items(): |
| 137 preds = pred_labels if one_scorer.__class__.__name__ == "_PredictScorer" else pred_probas | 140 preds = ( |
| 141 pred_labels | |
| 142 if one_scorer.__class__.__name__ == "_PredictScorer" | |
| 143 else pred_probas | |
| 144 ) | |
| 138 score = one_scorer._score_func(targets, preds, **one_scorer._kwargs) | 145 score = one_scorer._score_func(targets, preds, **one_scorer._kwargs) |
| 139 scores[name] = score | 146 scores[name] = score |
| 140 | 147 |
| 141 # TODO: multi-class metrics | 148 # TODO: multi-class metrics |
| 142 # multi-label | 149 # multi-label |
| 143 else: | 150 else: |
| 144 pred_labels = (pred_probas > 0.5).astype("int32") | 151 pred_labels = (pred_probas > 0.5).astype("int32") |
| 145 targets = y_true.astype("int32") | 152 targets = y_true.astype("int32") |
| 146 if not is_multimetric: | 153 if not is_multimetric: |
| 147 preds = pred_labels if scorer.__class__.__name__ == "_PredictScorer" else pred_probas | 154 preds = ( |
| 155 pred_labels | |
| 156 if scorer.__class__.__name__ == "_PredictScorer" | |
| 157 else pred_probas | |
| 158 ) | |
| 148 score, _ = compute_score(preds, targets, scorer._score_func) | 159 score, _ = compute_score(preds, targets, scorer._score_func) |
| 149 return score | 160 return score |
| 150 else: | 161 else: |
| 151 scores = {} | 162 scores = {} |
| 152 for name, one_scorer in scorer.items(): | 163 for name, one_scorer in scorer.items(): |
| 153 preds = pred_labels if one_scorer.__class__.__name__ == "_PredictScorer" else pred_probas | 164 preds = ( |
| 165 pred_labels | |
| 166 if one_scorer.__class__.__name__ == "_PredictScorer" | |
| 167 else pred_probas | |
| 168 ) | |
| 154 score, _ = compute_score(preds, targets, one_scorer._score_func) | 169 score, _ = compute_score(preds, targets, one_scorer._score_func) |
| 155 scores[name] = score | 170 scores[name] = score |
| 156 | 171 |
| 157 return scores | 172 return scores |
| 158 | 173 |
| 241 | 256 |
| 242 input_type = params["input_options"]["selected_input"] | 257 input_type = params["input_options"]["selected_input"] |
| 243 # tabular input | 258 # tabular input |
| 244 if input_type == "tabular": | 259 if input_type == "tabular": |
| 245 header = "infer" if params["input_options"]["header1"] else None | 260 header = "infer" if params["input_options"]["header1"] else None |
| 246 column_option = params["input_options"]["column_selector_options_1"]["selected_column_selector_option"] | 261 column_option = params["input_options"]["column_selector_options_1"][ |
| 262 "selected_column_selector_option" | |
| 263 ] | |
| 247 if column_option in [ | 264 if column_option in [ |
| 248 "by_index_number", | 265 "by_index_number", |
| 249 "all_but_by_index_number", | 266 "all_but_by_index_number", |
| 250 "by_header_name", | 267 "by_header_name", |
| 251 "all_but_by_header_name", | 268 "all_but_by_header_name", |
| 293 n_intervals = sum(1 for line in open(intervals)) | 310 n_intervals = sum(1 for line in open(intervals)) |
| 294 X = np.arange(n_intervals)[:, np.newaxis] | 311 X = np.arange(n_intervals)[:, np.newaxis] |
| 295 | 312 |
| 296 # Get target y | 313 # Get target y |
| 297 header = "infer" if params["input_options"]["header2"] else None | 314 header = "infer" if params["input_options"]["header2"] else None |
| 298 column_option = params["input_options"]["column_selector_options_2"]["selected_column_selector_option2"] | 315 column_option = params["input_options"]["column_selector_options_2"][ |
| 316 "selected_column_selector_option2" | |
| 317 ] | |
| 299 if column_option in [ | 318 if column_option in [ |
| 300 "by_index_number", | 319 "by_index_number", |
| 301 "all_but_by_index_number", | 320 "all_but_by_index_number", |
| 302 "by_header_name", | 321 "by_header_name", |
| 303 "all_but_by_header_name", | 322 "all_but_by_header_name", |
| 311 infile2 = loaded_df[df_key] | 330 infile2 = loaded_df[df_key] |
| 312 else: | 331 else: |
| 313 infile2 = pd.read_csv(infile2, sep="\t", header=header, parse_dates=True) | 332 infile2 = pd.read_csv(infile2, sep="\t", header=header, parse_dates=True) |
| 314 loaded_df[df_key] = infile2 | 333 loaded_df[df_key] = infile2 |
| 315 | 334 |
| 316 y = read_columns(infile2, | 335 y = read_columns( |
| 317 c=c, | 336 infile2, c=c, c_option=column_option, sep="\t", header=header, parse_dates=True |
| 318 c_option=column_option, | 337 ) |
| 319 sep='\t', | |
| 320 header=header, | |
| 321 parse_dates=True) | |
| 322 if len(y.shape) == 2 and y.shape[1] == 1: | 338 if len(y.shape) == 2 and y.shape[1] == 1: |
| 323 y = y.ravel() | 339 y = y.ravel() |
| 324 if input_type == "refseq_and_interval": | 340 if input_type == "refseq_and_interval": |
| 325 estimator.set_params(data_batch_generator__features=y.ravel().tolist()) | 341 estimator.set_params(data_batch_generator__features=y.ravel().tolist()) |
| 326 y = None | 342 y = None |
| 327 # end y | 343 # end y |
| 328 | 344 |
| 329 # load groups | 345 # load groups |
| 330 if groups: | 346 if groups: |
| 331 groups_selector = (params["experiment_schemes"]["test_split"]["split_algos"]).pop("groups_selector") | 347 groups_selector = ( |
| 348 params["experiment_schemes"]["test_split"]["split_algos"] | |
| 349 ).pop("groups_selector") | |
| 332 | 350 |
| 333 header = "infer" if groups_selector["header_g"] else None | 351 header = "infer" if groups_selector["header_g"] else None |
| 334 column_option = groups_selector["column_selector_options_g"]["selected_column_selector_option_g"] | 352 column_option = groups_selector["column_selector_options_g"][ |
| 353 "selected_column_selector_option_g" | |
| 354 ] | |
| 335 if column_option in [ | 355 if column_option in [ |
| 336 "by_index_number", | 356 "by_index_number", |
| 337 "all_but_by_index_number", | 357 "all_but_by_index_number", |
| 338 "by_header_name", | 358 "by_header_name", |
| 339 "all_but_by_header_name", | 359 "all_but_by_header_name", |
| 344 | 364 |
| 345 df_key = groups + repr(header) | 365 df_key = groups + repr(header) |
| 346 if df_key in loaded_df: | 366 if df_key in loaded_df: |
| 347 groups = loaded_df[df_key] | 367 groups = loaded_df[df_key] |
| 348 | 368 |
| 349 groups = read_columns(groups, | 369 groups = read_columns( |
| 350 c=c, | 370 groups, |
| 351 c_option=column_option, | 371 c=c, |
| 352 sep='\t', | 372 c_option=column_option, |
| 353 header=header, | 373 sep="\t", |
| 354 parse_dates=True) | 374 header=header, |
| 375 parse_dates=True, | |
| 376 ) | |
| 355 groups = groups.ravel() | 377 groups = groups.ravel() |
| 356 | 378 |
| 357 # del loaded_df | 379 # del loaded_df |
| 358 del loaded_df | 380 del loaded_df |
| 359 | 381 |
| 362 main_est = get_main_estimator(estimator) | 384 main_est = get_main_estimator(estimator) |
| 363 if main_est.__class__.__name__ == "IRAPSClassifier": | 385 if main_est.__class__.__name__ == "IRAPSClassifier": |
| 364 main_est.set_params(memory=memory) | 386 main_est.set_params(memory=memory) |
| 365 | 387 |
| 366 # handle scorer, convert to scorer dict | 388 # handle scorer, convert to scorer dict |
| 367 scoring = params['experiment_schemes']['metrics']['scoring'] | 389 scoring = params["experiment_schemes"]["metrics"]["scoring"] |
| 368 if scoring is not None: | 390 if scoring is not None: |
| 369 # get_scoring() expects secondary_scoring to be a comma separated string (not a list) | 391 # get_scoring() expects secondary_scoring to be a comma separated string (not a list) |
| 370 # Check if secondary_scoring is specified | 392 # Check if secondary_scoring is specified |
| 371 secondary_scoring = scoring.get("secondary_scoring", None) | 393 secondary_scoring = scoring.get("secondary_scoring", None) |
| 372 if secondary_scoring is not None: | 394 if secondary_scoring is not None: |
| 383 test_split_options["labels"] = groups | 405 test_split_options["labels"] = groups |
| 384 if test_split_options["shuffle"] == "stratified": | 406 if test_split_options["shuffle"] == "stratified": |
| 385 if y is not None: | 407 if y is not None: |
| 386 test_split_options["labels"] = y | 408 test_split_options["labels"] = y |
| 387 else: | 409 else: |
| 388 raise ValueError("Stratified shuffle split is not " "applicable on empty target values!") | 410 raise ValueError( |
| 411 "Stratified shuffle split is not " "applicable on empty target values!" | |
| 412 ) | |
| 389 | 413 |
| 390 ( | 414 ( |
| 391 X_train, | 415 X_train, |
| 392 X_test, | 416 X_test, |
| 393 y_train, | 417 y_train, |
| 406 val_split_options["labels"] = groups_train | 430 val_split_options["labels"] = groups_train |
| 407 if val_split_options["shuffle"] == "stratified": | 431 if val_split_options["shuffle"] == "stratified": |
| 408 if y_train is not None: | 432 if y_train is not None: |
| 409 val_split_options["labels"] = y_train | 433 val_split_options["labels"] = y_train |
| 410 else: | 434 else: |
| 411 raise ValueError("Stratified shuffle split is not " "applicable on empty target values!") | 435 raise ValueError( |
| 436 "Stratified shuffle split is not " | |
| 437 "applicable on empty target values!" | |
| 438 ) | |
| 412 | 439 |
| 413 ( | 440 ( |
| 414 X_train, | 441 X_train, |
| 415 X_val, | 442 X_val, |
| 416 y_train, | 443 y_train, |
| 429 estimator.fit(X_train, y_train) | 456 estimator.fit(X_train, y_train) |
| 430 | 457 |
| 431 if hasattr(estimator, "evaluate"): | 458 if hasattr(estimator, "evaluate"): |
| 432 steps = estimator.prediction_steps | 459 steps = estimator.prediction_steps |
| 433 batch_size = estimator.batch_size | 460 batch_size = estimator.batch_size |
| 434 generator = estimator.data_generator_.flow(X_test, y=y_test, batch_size=batch_size) | 461 generator = estimator.data_generator_.flow( |
| 435 predictions, y_true = _predict_generator(estimator.model_, generator, steps=steps) | 462 X_test, y=y_test, batch_size=batch_size |
| 463 ) | |
| 464 predictions, y_true = _predict_generator( | |
| 465 estimator.model_, generator, steps=steps | |
| 466 ) | |
| 436 scores = _evaluate(y_true, predictions, scorer, is_multimetric=True) | 467 scores = _evaluate(y_true, predictions, scorer, is_multimetric=True) |
| 437 | 468 |
| 438 else: | 469 else: |
| 439 if hasattr(estimator, "predict_proba"): | 470 if hasattr(estimator, "predict_proba"): |
| 440 predictions = estimator.predict_proba(X_test) | 471 predictions = estimator.predict_proba(X_test) |
