# HG changeset patch # User rv43 # Date 1679337026 0 # Node ID fc38431f257f7b9951012b288d9e13dc94ca3f99 # Parent 3fd7398a3a7fdbba53fc4e97d10d40aabfc5ad89 planemo upload for repository https://github.com/rolfverberg/galaxytools commit f8c4bdb31c20c468045ad5e6eb255a293244bc6c diff -r 3fd7398a3a7f -r fc38431f257f fit.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/fit.py Mon Mar 20 18:30:26 2023 +0000 @@ -0,0 +1,2576 @@ +#!/usr/bin/env python3 + +# -*- coding: utf-8 -*- +""" +Created on Mon Dec 6 15:36:22 2021 + +@author: rv43 +""" + +import logging + +from asteval import Interpreter, get_ast_names +from copy import deepcopy +from lmfit import Model, Parameters +from lmfit.model import ModelResult +from lmfit.models import ConstantModel, LinearModel, QuadraticModel, PolynomialModel,\ + ExponentialModel, StepModel, RectangleModel, ExpressionModel, GaussianModel,\ + LorentzianModel +import numpy as np +from os import cpu_count, getpid, listdir, mkdir, path +from re import compile, sub +from shutil import rmtree +try: + from sympy import diff, simplify +except: + pass +try: + from joblib import Parallel, delayed + have_joblib = True +except: + have_joblib = False +try: + import xarray as xr + have_xarray = True +except: + have_xarray = False + +try: + from .general import illegal_value, is_int, is_dict_series, is_index, index_nearest, \ + almost_equal, quick_plot #, eval_expr +except: + try: + from sys import path as syspath + syspath.append(f'/nfs/chess/user/rv43/msnctools/msnctools') + from general import illegal_value, is_int, is_dict_series, is_index, index_nearest, \ + almost_equal, quick_plot #, eval_expr + except: + from general import illegal_value, is_int, is_dict_series, is_index, index_nearest, \ + almost_equal, quick_plot #, eval_expr + +from sys import float_info +float_min = float_info.min +float_max = float_info.max + +# sigma = fwhm_factor*fwhm +fwhm_factor = { + 'gaussian': f'fwhm/(2*sqrt(2*log(2)))', + 'lorentzian': f'0.5*fwhm', + 'splitlorentzian': f'0.5*fwhm', # sigma = sigma_r + 'voight': f'0.2776*fwhm', # sigma = gamma + 'pseudovoight': f'0.5*fwhm'} # fraction = 0.5 + +# amplitude = height_factor*height*fwhm +height_factor = { + 'gaussian': f'height*fwhm*0.5*sqrt(pi/log(2))', + 'lorentzian': f'height*fwhm*0.5*pi', + 'splitlorentzian': f'height*fwhm*0.5*pi', # sigma = sigma_r + 'voight': f'3.334*height*fwhm', # sigma = gamma + 'pseudovoight': f'1.268*height*fwhm'} # fraction = 0.5 + +class Fit: + """Wrapper class for lmfit + """ + def __init__(self, y, x=None, models=None, normalize=True, **kwargs): + if not isinstance(normalize, bool): + raise ValueError(f'Invalid parameter normalize ({normalize})') + self._mask = None + self._model = None + self._norm = None + self._normalized = False + self._parameters = Parameters() + self._parameter_bounds = None + self._parameter_norms = {} + self._linear_parameters = [] + self._nonlinear_parameters = [] + self._result = None + self._try_linear_fit = True + self._y = None + self._y_norm = None + self._y_range = None + if 'try_linear_fit' in kwargs: + try_linear_fit = kwargs.pop('try_linear_fit') + if not isinstance(try_linear_fit, bool): + illegal_value(try_linear_fit, 'try_linear_fit', 'Fit.fit', raise_error=True) + self._try_linear_fit = try_linear_fit + if y is not None: + if isinstance(y, (tuple, list, np.ndarray)): + self._x = np.asarray(x) + elif have_xarray and isinstance(y, xr.DataArray): + if x is not None: + logging.warning('Ignoring superfluous input x ({x}) in Fit.__init__') + if y.ndim != 1: + illegal_value(y.ndim, 'DataArray dimensions', 'Fit:__init__', raise_error=True) + self._x = np.asarray(y[y.dims[0]]) + else: + illegal_value(y, 'y', 'Fit:__init__', raise_error=True) + self._y = y + if self._x.ndim != 1: + raise ValueError(f'Invalid dimension for input x ({self._x.ndim})') + if self._x.size != self._y.size: + raise ValueError(f'Inconsistent x and y dimensions ({self._x.size} vs '+ + f'{self._y.size})') + if 'mask' in kwargs: + self._mask = kwargs.pop('mask') + if self._mask is None: + y_min = float(self._y.min()) + self._y_range = float(self._y.max())-y_min + if normalize and self._y_range > 0.0: + self._norm = (y_min, self._y_range) + else: + self._mask = np.asarray(self._mask).astype(bool) + if self._x.size != self._mask.size: + raise ValueError(f'Inconsistent x and mask dimensions ({self._x.size} vs '+ + f'{self._mask.size})') + y_masked = np.asarray(self._y)[~self._mask] + y_min = float(y_masked.min()) + self._y_range = float(y_masked.max())-y_min + if normalize and self._y_range > 0.0: + if normalize and self._y_range > 0.0: + self._norm = (y_min, self._y_range) + if models is not None: + if callable(models) or isinstance(models, str): + kwargs = self.add_model(models, **kwargs) + elif isinstance(models, (tuple, list)): + for model in models: + kwargs = self.add_model(model, **kwargs) + self.fit(**kwargs) + + @classmethod + def fit_data(cls, y, models, x=None, normalize=True, **kwargs): + return(cls(y, x=x, models=models, normalize=normalize, **kwargs)) + + @property + def best_errors(self): + if self._result is None: + return(None) + return({name:self._result.params[name].stderr for name in sorted(self._result.params) + if name != 'tmp_normalization_offset_c'}) + + @property + def best_fit(self): + if self._result is None: + return(None) + return(self._result.best_fit) + + @property + def best_parameters(self): + if self._result is None: + return(None) + parameters = {} + for name in sorted(self._result.params): + if name != 'tmp_normalization_offset_c': + par = self._result.params[name] + parameters[name] = {'value': par.value, 'error': par.stderr, + 'init_value': par.init_value, 'min': par.min, 'max': par.max, + 'vary': par.vary, 'expr': par.expr} + return(parameters) + + @property + def best_results(self): + """Convert the input data array to a data set and add the fit results. + """ + if self._result is None: + return(None) + if isinstance(self._y, xr.DataArray): + best_results = self._y.to_dataset() + dims = self._y.dims + fit_name = f'{self._y.name}_fit' + else: + coords = {'x': (['x'], self._x)} + dims = ('x') + best_results = xr.Dataset(coords=coords) + best_results['y'] = (dims, self._y) + fit_name = 'y_fit' + best_results[fit_name] = (dims, self.best_fit) + if self._mask is not None: + best_results['mask'] = self._mask + best_results.coords['par_names'] = ('peak', [name for name in self.best_values.keys()]) + best_results['best_values'] = (['par_names'], [v for v in self.best_values.values()]) + best_results['best_errors'] = (['par_names'], [v for v in self.best_errors.values()]) + best_results.attrs['components'] = self.components + return(best_results) + + @property + def best_values(self): + if self._result is None: + return(None) + return({name:self._result.params[name].value for name in sorted(self._result.params) + if name != 'tmp_normalization_offset_c'}) + + @property + def chisqr(self): + if self._result is None: + return(None) + return(self._result.chisqr) + + @property + def components(self): + components = {} + if self._result is None: + logging.warning('Unable to collect components in Fit.components') + return(components) + for component in self._result.components: + if 'tmp_normalization_offset_c' in component.param_names: + continue + parameters = {} + for name in component.param_names: + par = self._parameters[name] + parameters[name] = {'free': par.vary, 'value': self._result.params[name].value} + if par.expr is not None: + parameters[name]['expr'] = par.expr + expr = None + if isinstance(component, ExpressionModel): + name = component._name + if name[-1] == '_': + name = name[:-1] + expr = component.expr + else: + prefix = component.prefix + if len(prefix): + if prefix[-1] == '_': + prefix = prefix[:-1] + name = f'{prefix} ({component._name})' + else: + name = f'{component._name}' + if expr is None: + components[name] = {'parameters': parameters} + else: + components[name] = {'expr': expr, 'parameters': parameters} + return(components) + + @property + def covar(self): + if self._result is None: + return(None) + return(self._result.covar) + + @property + def init_parameters(self): + if self._result is None or self._result.init_params is None: + return(None) + parameters = {} + for name in sorted(self._result.init_params): + if name != 'tmp_normalization_offset_c': + par = self._result.init_params[name] + parameters[name] = {'value': par.value, 'min': par.min, 'max': par.max, + 'vary': par.vary, 'expr': par.expr} + return(parameters) + + @property + def init_values(self): + if self._result is None or self._result.init_params is None: + return(None) + return({name:self._result.init_params[name].value for name in + sorted(self._result.init_params) if name != 'tmp_normalization_offset_c'}) + + @property + def normalization_offset(self): + if self._result is None: + return(None) + if self._norm is None: + return(0.0) + else: + if self._result.init_params is not None: + normalization_offset = self._result.init_params['tmp_normalization_offset_c'] + else: + normalization_offset = self._result.params['tmp_normalization_offset_c'] + return(normalization_offset) + + @property + def num_func_eval(self): + if self._result is None: + return(None) + return(self._result.nfev) + + @property + def parameters(self): + return({name:{'min': par.min, 'max': par.max, 'vary': par.vary, 'expr': par.expr} + for name, par in self._parameters.items() if name != 'tmp_normalization_offset_c'}) + + @property + def redchi(self): + if self._result is None: + return(None) + return(self._result.redchi) + + @property + def residual(self): + if self._result is None: + return(None) + return(self._result.residual) + + @property + def success(self): + if self._result is None: + return(None) + if not self._result.success: +# print(f'ier = {self._result.ier}') +# print(f'lmdif_message = {self._result.lmdif_message}') +# print(f'message = {self._result.message}') +# print(f'nfev = {self._result.nfev}') +# print(f'redchi = {self._result.redchi}') +# print(f'success = {self._result.success}') + if self._result.ier == 0 or self._result.ier == 5: + logging.warning(f'ier = {self._result.ier}: {self._result.message}') + else: + logging.warning(f'ier = {self._result.ier}: {self._result.message}') + return(True) +# self.print_fit_report() +# self.plot() + return(self._result.success) + + @property + def var_names(self): + """Intended to be used with covar + """ + if self._result is None: + return(None) + return(getattr(self._result, 'var_names', None)) + + @property + def x(self): + return(self._x) + + @property + def y(self): + return(self._y) + + def print_fit_report(self, result=None, show_correl=False): + if result is None: + result = self._result + if result is not None: + print(result.fit_report(show_correl=show_correl)) + + def add_parameter(self, **parameter): + if not isinstance(parameter, dict): + raise ValueError(f'Invalid parameter ({parameter})') + if parameter.get('expr') is not None: + raise KeyError(f'Illegal "expr" key in parameter {parameter}') + name = parameter['name'] + if not isinstance(name, str): + raise ValueError(f'Illegal "name" value ({name}) in parameter {parameter}') + if parameter.get('norm') is None: + self._parameter_norms[name] = False + else: + norm = parameter.pop('norm') + if self._norm is None: + logging.warning(f'Ignoring norm in parameter {name} in '+ + f'Fit.add_parameter (normalization is turned off)') + self._parameter_norms[name] = False + else: + if not isinstance(norm, bool): + raise ValueError(f'Illegal "norm" value ({norm}) in parameter {parameter}') + self._parameter_norms[name] = norm + vary = parameter.get('vary') + if vary is not None: + if not isinstance(vary, bool): + raise ValueError(f'Illegal "vary" value ({vary}) in parameter {parameter}') + if not vary: + if 'min' in parameter: + logging.warning(f'Ignoring min in parameter {name} in '+ + f'Fit.add_parameter (vary = {vary})') + parameter.pop('min') + if 'max' in parameter: + logging.warning(f'Ignoring max in parameter {name} in '+ + f'Fit.add_parameter (vary = {vary})') + parameter.pop('max') + if self._norm is not None and name not in self._parameter_norms: + raise ValueError(f'Missing parameter normalization type for paremeter {name}') + self._parameters.add(**parameter) + + def add_model(self, model, prefix=None, parameters=None, parameter_norms=None, **kwargs): + # Create the new model +# print(f'at start add_model:\nself._parameters:\n{self._parameters}') +# print(f'at start add_model: kwargs = {kwargs}') +# print(f'parameters = {parameters}') +# print(f'parameter_norms = {parameter_norms}') +# if len(self._parameters.keys()): +# print('\nAt start adding model:') +# self._parameters.pretty_print() +# print(f'parameter_norms:\n{self._parameter_norms}') + if prefix is not None and not isinstance(prefix, str): + logging.warning('Ignoring illegal prefix: {model} {type(model)}') + prefix = None + if prefix is None: + pprefix = '' + else: + pprefix = prefix + if parameters is not None: + if isinstance(parameters, dict): + parameters = (parameters, ) + elif not is_dict_series(parameters): + illegal_value(parameters, 'parameters', 'Fit.add_model', raise_error=True) + parameters = deepcopy(parameters) + if parameter_norms is not None: + if isinstance(parameter_norms, dict): + parameter_norms = (parameter_norms, ) + if not is_dict_series(parameter_norms): + illegal_value(parameter_norms, 'parameter_norms', 'Fit.add_model', raise_error=True) + new_parameter_norms = {} + if callable(model): + # Linear fit not yet implemented for callable models + self._try_linear_fit = False + if parameter_norms is None: + if parameters is None: + raise ValueError('Either "parameters" or "parameter_norms" is required in '+ + f'{model}') + for par in parameters: + name = par['name'] + if not isinstance(name, str): + raise ValueError(f'Illegal "name" value ({name}) in input parameters') + if par.get('norm') is not None: + norm = par.pop('norm') + if not isinstance(norm, bool): + raise ValueError(f'Illegal "norm" value ({norm}) in input parameters') + new_parameter_norms[f'{pprefix}{name}'] = norm + else: + for par in parameter_norms: + name = par['name'] + if not isinstance(name, str): + raise ValueError(f'Illegal "name" value ({name}) in input parameters') + norm = par.get('norm') + if norm is None or not isinstance(norm, bool): + raise ValueError(f'Illegal "norm" value ({norm}) in input parameters') + new_parameter_norms[f'{pprefix}{name}'] = norm + if parameters is not None: + for par in parameters: + if par.get('expr') is not None: + raise KeyError(f'Illegal "expr" key ({par.get("expr")}) in parameter '+ + f'{name} for a callable model {model}') + name = par['name'] + if not isinstance(name, str): + raise ValueError(f'Illegal "name" value ({name}) in input parameters') +# RV FIX callable model will need partial deriv functions for any linear pars to get the linearized matrix, so for now skip linear solution option + newmodel = Model(model, prefix=prefix) + elif isinstance(model, str): + if model == 'constant': # Par: c + newmodel = ConstantModel(prefix=prefix) + new_parameter_norms[f'{pprefix}c'] = True + self._linear_parameters.append(f'{pprefix}c') + elif model == 'linear': # Par: slope, intercept + newmodel = LinearModel(prefix=prefix) + new_parameter_norms[f'{pprefix}slope'] = True + new_parameter_norms[f'{pprefix}intercept'] = True + self._linear_parameters.append(f'{pprefix}slope') + self._linear_parameters.append(f'{pprefix}intercept') + elif model == 'quadratic': # Par: a, b, c + newmodel = QuadraticModel(prefix=prefix) + new_parameter_norms[f'{pprefix}a'] = True + new_parameter_norms[f'{pprefix}b'] = True + new_parameter_norms[f'{pprefix}c'] = True + self._linear_parameters.append(f'{pprefix}a') + self._linear_parameters.append(f'{pprefix}b') + self._linear_parameters.append(f'{pprefix}c') + elif model == 'gaussian': # Par: amplitude, center, sigma (fwhm, height) + newmodel = GaussianModel(prefix=prefix) + new_parameter_norms[f'{pprefix}amplitude'] = True + new_parameter_norms[f'{pprefix}center'] = False + new_parameter_norms[f'{pprefix}sigma'] = False + self._linear_parameters.append(f'{pprefix}amplitude') + self._nonlinear_parameters.append(f'{pprefix}center') + self._nonlinear_parameters.append(f'{pprefix}sigma') + # parameter norms for height and fwhm are needed to get correct errors + new_parameter_norms[f'{pprefix}height'] = True + new_parameter_norms[f'{pprefix}fwhm'] = False + elif model == 'lorentzian': # Par: amplitude, center, sigma (fwhm, height) + newmodel = LorentzianModel(prefix=prefix) + new_parameter_norms[f'{pprefix}amplitude'] = True + new_parameter_norms[f'{pprefix}center'] = False + new_parameter_norms[f'{pprefix}sigma'] = False + self._linear_parameters.append(f'{pprefix}amplitude') + self._nonlinear_parameters.append(f'{pprefix}center') + self._nonlinear_parameters.append(f'{pprefix}sigma') + # parameter norms for height and fwhm are needed to get correct errors + new_parameter_norms[f'{pprefix}height'] = True + new_parameter_norms[f'{pprefix}fwhm'] = False + elif model == 'exponential': # Par: amplitude, decay + newmodel = ExponentialModel(prefix=prefix) + new_parameter_norms[f'{pprefix}amplitude'] = True + new_parameter_norms[f'{pprefix}decay'] = False + self._linear_parameters.append(f'{pprefix}amplitude') + self._nonlinear_parameters.append(f'{pprefix}decay') + elif model == 'step': # Par: amplitude, center, sigma + form = kwargs.get('form') + if form is not None: + kwargs.pop('form') + if form is None or form not in ('linear', 'atan', 'arctan', 'erf', 'logistic'): + raise ValueError(f'Invalid parameter form for build-in step model ({form})') + newmodel = StepModel(prefix=prefix, form=form) + new_parameter_norms[f'{pprefix}amplitude'] = True + new_parameter_norms[f'{pprefix}center'] = False + new_parameter_norms[f'{pprefix}sigma'] = False + self._linear_parameters.append(f'{pprefix}amplitude') + self._nonlinear_parameters.append(f'{pprefix}center') + self._nonlinear_parameters.append(f'{pprefix}sigma') + elif model == 'rectangle': # Par: amplitude, center1, center2, sigma1, sigma2 + form = kwargs.get('form') + if form is not None: + kwargs.pop('form') + if form is None or form not in ('linear', 'atan', 'arctan', 'erf', 'logistic'): + raise ValueError('Invalid parameter form for build-in rectangle model '+ + f'({form})') + newmodel = RectangleModel(prefix=prefix, form=form) + new_parameter_norms[f'{pprefix}amplitude'] = True + new_parameter_norms[f'{pprefix}center1'] = False + new_parameter_norms[f'{pprefix}center2'] = False + new_parameter_norms[f'{pprefix}sigma1'] = False + new_parameter_norms[f'{pprefix}sigma2'] = False + self._linear_parameters.append(f'{pprefix}amplitude') + self._nonlinear_parameters.append(f'{pprefix}center1') + self._nonlinear_parameters.append(f'{pprefix}center2') + self._nonlinear_parameters.append(f'{pprefix}sigma1') + self._nonlinear_parameters.append(f'{pprefix}sigma2') + elif model == 'expression': # Par: by expression + expr = kwargs['expr'] + if not isinstance(expr, str): + raise ValueError(f'Illegal "expr" value ({expr}) in {model}') + kwargs.pop('expr') + if parameter_norms is not None: + logging.warning('Ignoring parameter_norms (normalization determined from '+ + 'linearity)}') + if parameters is not None: + for par in parameters: + if par.get('expr') is not None: + raise KeyError(f'Illegal "expr" key ({par.get("expr")}) in parameter '+ + f'({par}) for an expression model') + if par.get('norm') is not None: + logging.warning(f'Ignoring "norm" key in parameter ({par}) '+ + '(normalization determined from linearity)}') + par.pop('norm') + name = par['name'] + if not isinstance(name, str): + raise ValueError(f'Illegal "name" value ({name}) in input parameters') + ast = Interpreter() + expr_parameters = [name for name in get_ast_names(ast.parse(expr)) + if name != 'x' and name not in self._parameters + and name not in ast.symtable] +# print(f'\nexpr_parameters: {expr_parameters}') +# print(f'expr = {expr}') + if prefix is None: + newmodel = ExpressionModel(expr=expr) + else: + for name in expr_parameters: + expr = sub(rf'\b{name}\b', f'{prefix}{name}', expr) + expr_parameters = [f'{prefix}{name}' for name in expr_parameters] +# print(f'\nexpr_parameters: {expr_parameters}') +# print(f'expr = {expr}') + newmodel = ExpressionModel(expr=expr, name=name) +# print(f'\nnewmodel = {newmodel.__dict__}') +# print(f'params_names = {newmodel._param_names}') +# print(f'params_names = {newmodel.param_names}') + # Remove already existing names + for name in newmodel.param_names.copy(): + if name not in expr_parameters: + newmodel._func_allargs.remove(name) + newmodel._param_names.remove(name) +# print(f'params_names = {newmodel._param_names}') +# print(f'params_names = {newmodel.param_names}') + else: + raise ValueError(f'Unknown build-in fit model ({model})') + else: + illegal_value(model, 'model', 'Fit.add_model', raise_error=True) + + # Add the new model to the current one +# print('\nBefore adding model:') +# print(f'\nnewmodel = {newmodel.__dict__}') +# if len(self._parameters): +# self._parameters.pretty_print() + if self._model is None: + self._model = newmodel + else: + self._model += newmodel + new_parameters = newmodel.make_params() + self._parameters += new_parameters +# print('\nAfter adding model:') +# print(f'\nnewmodel = {newmodel.__dict__}') +# print(f'\nnew_parameters = {new_parameters}') +# self._parameters.pretty_print() + + # Check linearity of expression model paremeters + if isinstance(newmodel, ExpressionModel): + for name in newmodel.param_names: + if not diff(newmodel.expr, name, name): + if name not in self._linear_parameters: + self._linear_parameters.append(name) + new_parameter_norms[name] = True +# print(f'\nADDING {name} TO LINEAR') + else: + if name not in self._nonlinear_parameters: + self._nonlinear_parameters.append(name) + new_parameter_norms[name] = False +# print(f'\nADDING {name} TO NONLINEAR') +# print(f'new_parameter_norms:\n{new_parameter_norms}') + + # Scale the default initial model parameters + if self._norm is not None: + for name, norm in new_parameter_norms.copy().items(): + par = self._parameters.get(name) + if par is None: + new_parameter_norms.pop(name) + continue + if par.expr is None and norm: + value = par.value*self._norm[1] + _min = par.min + _max = par.max + if not np.isinf(_min) and abs(_min) != float_min: + _min *= self._norm[1] + if not np.isinf(_max) and abs(_max) != float_min: + _max *= self._norm[1] + par.set(value=value, min=_min, max=_max) +# print('\nAfter norm defaults:') +# self._parameters.pretty_print() +# print(f'parameters:\n{parameters}') +# print(f'all_parameters:\n{list(self.parameters)}') +# print(f'new_parameter_norms:\n{new_parameter_norms}') +# print(f'parameter_norms:\n{self._parameter_norms}') + + # Initialize the model parameters from parameters + if prefix is None: + prefix = "" + if parameters is not None: + for parameter in parameters: + name = parameter['name'] + if not isinstance(name, str): + raise ValueError(f'Illegal "name" value ({name}) in input parameters') + if name not in new_parameters: + name = prefix+name + parameter['name'] = name + if name not in new_parameters: + logging.warning(f'Ignoring superfluous parameter info for {name}') + continue + if name in self._parameters: + parameter.pop('name') + if 'norm' in parameter: + if not isinstance(parameter['norm'], bool): + illegal_value(parameter['norm'], 'norm', 'Fit.add_model', + raise_error=True) + new_parameter_norms[name] = parameter['norm'] + parameter.pop('norm') + if parameter.get('expr') is not None: + if 'value' in parameter: + logging.warning(f'Ignoring value in parameter {name} '+ + f'(set by expression: {parameter["expr"]})') + parameter.pop('value') + if 'vary' in parameter: + logging.warning(f'Ignoring vary in parameter {name} '+ + f'(set by expression: {parameter["expr"]})') + parameter.pop('vary') + if 'min' in parameter: + logging.warning(f'Ignoring min in parameter {name} '+ + f'(set by expression: {parameter["expr"]})') + parameter.pop('min') + if 'max' in parameter: + logging.warning(f'Ignoring max in parameter {name} '+ + f'(set by expression: {parameter["expr"]})') + parameter.pop('max') + if 'vary' in parameter: + if not isinstance(parameter['vary'], bool): + illegal_value(parameter['vary'], 'vary', 'Fit.add_model', + raise_error=True) + if not parameter['vary']: + if 'min' in parameter: + logging.warning(f'Ignoring min in parameter {name} in '+ + f'Fit.add_model (vary = {parameter["vary"]})') + parameter.pop('min') + if 'max' in parameter: + logging.warning(f'Ignoring max in parameter {name} in '+ + f'Fit.add_model (vary = {parameter["vary"]})') + parameter.pop('max') + self._parameters[name].set(**parameter) + parameter['name'] = name + else: + illegal_value(parameter, 'parameter name', 'Fit.model', raise_error=True) + self._parameter_norms = {**self._parameter_norms, **new_parameter_norms} +# print('\nAfter parameter init:') +# self._parameters.pretty_print() +# print(f'parameters:\n{parameters}') +# print(f'new_parameter_norms:\n{new_parameter_norms}') +# print(f'parameter_norms:\n{self._parameter_norms}') +# print(f'kwargs:\n{kwargs}') + + # Initialize the model parameters from kwargs + for name, value in {**kwargs}.items(): + full_name = f'{pprefix}{name}' + if full_name in new_parameter_norms and isinstance(value, (int, float)): + kwargs.pop(name) + if self._parameters[full_name].expr is None: + self._parameters[full_name].set(value=value) + else: + logging.warning(f'Ignoring parameter {name} in Fit.fit (set by expression: '+ + f'{self._parameters[full_name].expr})') +# print('\nAfter kwargs init:') +# self._parameters.pretty_print() +# print(f'parameter_norms:\n{self._parameter_norms}') +# print(f'kwargs:\n{kwargs}') + + # Check parameter norms (also need it for expressions to renormalize the errors) + if self._norm is not None and (callable(model) or model == 'expression'): + missing_norm = False + for name in new_parameters.valuesdict(): + if name not in self._parameter_norms: + print(f'new_parameters:\n{new_parameters.valuesdict()}') + print(f'self._parameter_norms:\n{self._parameter_norms}') + logging.error(f'Missing parameter normalization type for {name} in {model}') + missing_norm = True + if missing_norm: + raise ValueError + +# print(f'at end add_model:\nself._parameters:\n{list(self.parameters)}') +# print(f'at end add_model: kwargs = {kwargs}') +# print(f'\nat end add_model: newmodel:\n{newmodel.__dict__}\n') + return(kwargs) + + def fit(self, interactive=False, guess=False, **kwargs): + # Check inputs + if self._model is None: + logging.error('Undefined fit model') + return + if not isinstance(interactive, bool): + illegal_value(interactive, 'interactive', 'Fit.fit', raise_error=True) + if not isinstance(guess, bool): + illegal_value(guess, 'guess', 'Fit.fit', raise_error=True) + if 'try_linear_fit' in kwargs: + try_linear_fit = kwargs.pop('try_linear_fit') + if not isinstance(try_linear_fit, bool): + illegal_value(try_linear_fit, 'try_linear_fit', 'Fit.fit', raise_error=True) + if not self._try_linear_fit: + logging.warning('Ignore superfluous keyword argument "try_linear_fit" (not '+ + 'yet supported for callable models)') + else: + self._try_linear_fit = try_linear_fit +# if self._result is None: +# if 'parameters' in kwargs: +# raise ValueError('Invalid parameter parameters ({kwargs["parameters"]})') +# else: + if self._result is not None: + if guess: + logging.warning('Ignoring input parameter guess in Fit.fit during refitting') + guess = False + + # Check for circular expressions + # FIX TODO +# for name1, par1 in self._parameters.items(): +# if par1.expr is not None: + + # Apply mask if supplied: + if 'mask' in kwargs: + self._mask = kwargs.pop('mask') + if self._mask is not None: + self._mask = np.asarray(self._mask).astype(bool) + if self._x.size != self._mask.size: + raise ValueError(f'Inconsistent x and mask dimensions ({self._x.size} vs '+ + f'{self._mask.size})') + + # Estimate initial parameters with build-in lmfit guess method (only for a single model) +# print(f'\nat start fit: kwargs = {kwargs}') +#RV print('\nAt start of fit:') +#RV self._parameters.pretty_print() +# print(f'parameter_norms:\n{self._parameter_norms}') + if guess: + if self._mask is None: + self._parameters = self._model.guess(self._y, x=self._x) + else: + self._parameters = self._model.guess(np.asarray(self._y)[~self._mask], + x=self._x[~self._mask]) +# print('\nAfter guess:') +# self._parameters.pretty_print() + + # Add constant offset for a normalized model + if self._result is None and self._norm is not None and self._norm[0]: + self.add_model('constant', prefix='tmp_normalization_offset_', parameters={'name': 'c', + 'value': -self._norm[0], 'vary': False, 'norm': True}) + #'value': -self._norm[0]/self._norm[1], 'vary': False, 'norm': False}) + + # Adjust existing parameters for refit: + if 'parameters' in kwargs: + parameters = kwargs.pop('parameters') + if isinstance(parameters, dict): + parameters = (parameters, ) + elif not is_dict_series(parameters): + illegal_value(parameters, 'parameters', 'Fit.fit', raise_error=True) + for par in parameters: + name = par['name'] + if name not in self._parameters: + raise ValueError(f'Unable to match {name} parameter {par} to an existing one') + if self._parameters[name].expr is not None: + raise ValueError(f'Unable to modify {name} parameter {par} (currently an '+ + 'expression)') + if par.get('expr') is not None: + raise KeyError(f'Illegal "expr" key in {name} parameter {par}') + self._parameters[name].set(vary=par.get('vary')) + self._parameters[name].set(min=par.get('min')) + self._parameters[name].set(max=par.get('max')) + self._parameters[name].set(value=par.get('value')) +#RV print('\nAfter adjust:') +#RV self._parameters.pretty_print() + + # Apply parameter updates through keyword arguments +# print(f'kwargs = {kwargs}') +# print(f'parameter_norms = {self._parameter_norms}') + for name in set(self._parameters) & set(kwargs): + value = kwargs.pop(name) + if self._parameters[name].expr is None: + self._parameters[name].set(value=value) + else: + logging.warning(f'Ignoring parameter {name} in Fit.fit (set by expression: '+ + f'{self._parameters[name].expr})') + + # Check for uninitialized parameters + for name, par in self._parameters.items(): + if par.expr is None: + value = par.value + if value is None or np.isinf(value) or np.isnan(value): + if interactive: + value = input_num(f'Enter an initial value for {name}', default=1.0) + else: + value = 1.0 + if self._norm is None or name not in self._parameter_norms: + self._parameters[name].set(value=value) + elif self._parameter_norms[name]: + self._parameters[name].set(value=value*self._norm[1]) + + # Check if model is linear + try: + linear_model = self._check_linearity_model() + except: + linear_model = False +# print(f'\n\n--------> linear_model = {linear_model}\n') + if kwargs.get('check_only_linearity') is not None: + return(linear_model) + + # Normalize the data and initial parameters +#RV print('\nBefore normalization:') +#RV self._parameters.pretty_print() +# print(f'parameter_norms:\n{self._parameter_norms}') + self._normalize() +# print(f'norm = {self._norm}') +#RV print('\nAfter normalization:') +#RV self._parameters.pretty_print() +# self.print_fit_report() +# print(f'parameter_norms:\n{self._parameter_norms}') + + if linear_model: + # Perform a linear fit by direct matrix solution with numpy + try: + if self._mask is None: + self._fit_linear_model(self._x, self._y_norm) + else: + self._fit_linear_model(self._x[~self._mask], + np.asarray(self._y_norm)[~self._mask]) + except: + linear_model = False + if not linear_model: + # Perform a non-linear fit with lmfit + # Prevent initial values from sitting at boundaries + self._parameter_bounds = {name:{'min': par.min, 'max': par.max} for name, par in + self._parameters.items() if par.vary} + for par in self._parameters.values(): + if par.vary: + par.set(value=self._reset_par_at_boundary(par, par.value)) +# print('\nAfter checking boundaries:') +# self._parameters.pretty_print() + + # Perform the fit +# fit_kws = None +# if 'Dfun' in kwargs: +# fit_kws = {'Dfun': kwargs.pop('Dfun')} +# self._result = self._model.fit(self._y_norm, self._parameters, x=self._x, +# fit_kws=fit_kws, **kwargs) + if self._mask is None: + self._result = self._model.fit(self._y_norm, self._parameters, x=self._x, **kwargs) + else: + self._result = self._model.fit(np.asarray(self._y_norm)[~self._mask], + self._parameters, x=self._x[~self._mask], **kwargs) +#RV print('\nAfter fit:') +# print(f'\nself._result ({self._result}):\n\t{self._result.__dict__}') +#RV self._parameters.pretty_print() +# self.print_fit_report() + + # Set internal parameter values to fit results upon success + if self.success: + for name, par in self._parameters.items(): + if par.expr is None and par.vary: + par.set(value=self._result.params[name].value) +# print('\nAfter update parameter values:') +# self._parameters.pretty_print() + + # Renormalize the data and results + self._renormalize() +#RV print('\nAfter renormalization:') +#RV self._parameters.pretty_print() +# self.print_fit_report() + + def plot(self, y=None, y_title=None, result=None, skip_init=False, plot_comp_legends=False, + plot_residual=False, plot_masked_data=True, **kwargs): + if result is None: + result = self._result + if result is None: + return + plots = [] + legend = [] + if self._mask is None: + mask = np.zeros(self._x.size).astype(bool) + plot_masked_data = False + else: + mask = self._mask + if y is not None: + if not isinstance(y, (tuple, list, np.ndarray)): + illegal_value(y, 'y', 'Fit.plot') + if len(y) != len(self._x): + logging.warning('Ignoring parameter y in Fit.plot (wrong dimension)') + y = None + if y is not None: + if y_title is None or not isinstance(y_title, str): + y_title = 'data' + plots += [(self._x, y, '.')] + legend += [y_title] + if self._y is not None: + plots += [(self._x, np.asarray(self._y), 'b.')] + legend += ['data'] + if plot_masked_data: + plots += [(self._x[mask], np.asarray(self._y)[mask], 'bx')] + legend += ['masked data'] + if isinstance(plot_residual, bool) and plot_residual: + plots += [(self._x[~mask], result.residual, 'k-')] + legend += ['residual'] + plots += [(self._x[~mask], result.best_fit, 'k-')] + legend += ['best fit'] + if not skip_init and hasattr(result, 'init_fit'): + plots += [(self._x[~mask], result.init_fit, 'g-')] + legend += ['init'] + components = result.eval_components(x=self._x[~mask]) + num_components = len(components) + if 'tmp_normalization_offset_' in components: + num_components -= 1 + if num_components > 1: + eval_index = 0 + for modelname, y in components.items(): + if modelname == 'tmp_normalization_offset_': + continue + if modelname == '_eval': + modelname = f'eval{eval_index}' + if len(modelname) > 20: + modelname = f'{modelname[0:16]} ...' + if isinstance(y, (int, float)): + y *= np.ones(self._x[~mask].size) + plots += [(self._x[~mask], y, '--')] + if plot_comp_legends: + if modelname[-1] == '_': + legend.append(modelname[:-1]) + else: + legend.append(modelname) + title = kwargs.get('title') + if title is not None: + kwargs.pop('title') + quick_plot(tuple(plots), legend=legend, title=title, block=True, **kwargs) + + @staticmethod + def guess_init_peak(x, y, *args, center_guess=None, use_max_for_center=True): + """ Return a guess for the initial height, center and fwhm for a peak + """ +# print(f'\n\nargs = {args}') +# print(f'center_guess = {center_guess}') +# quick_plot(x, y, vlines=center_guess, block=True) + center_guesses = None + x = np.asarray(x) + y = np.asarray(y) + if len(x) != len(y): + logging.error(f'Invalid x and y lengths ({len(x)}, {len(y)}), skip initial guess') + return(None, None, None) + if isinstance(center_guess, (int, float)): + if len(args): + logging.warning('Ignoring additional arguments for single center_guess value') + center_guesses = [center_guess] + elif isinstance(center_guess, (tuple, list, np.ndarray)): + if len(center_guess) == 1: + logging.warning('Ignoring additional arguments for single center_guess value') + if not isinstance(center_guess[0], (int, float)): + raise ValueError(f'Invalid parameter center_guess ({type(center_guess[0])})') + center_guess = center_guess[0] + else: + if len(args) != 1: + raise ValueError(f'Invalid number of arguments ({len(args)})') + n = args[0] + if not is_index(n, 0, len(center_guess)): + raise ValueError('Invalid argument') + center_guesses = center_guess + center_guess = center_guesses[n] + elif center_guess is not None: + raise ValueError(f'Invalid center_guess type ({type(center_guess)})') +# print(f'x = {x}') +# print(f'y = {y}') +# print(f'center_guess = {center_guess}') + + # Sort the inputs + index = np.argsort(x) + x = x[index] + y = y[index] + miny = y.min() +# print(f'miny = {miny}') +# print(f'x_range = {x[0]} {x[-1]} {len(x)}') +# print(f'y_range = {y[0]} {y[-1]} {len(y)}') +# quick_plot(x, y, vlines=center_guess, block=True) + +# xx = x +# yy = y + # Set range for current peak +# print(f'n = {n}') +# print(f'center_guesses = {center_guesses}') + if center_guesses is not None: + if len(center_guesses) > 1: + index = np.argsort(center_guesses) + n = list(index).index(n) +# print(f'n = {n}') +# print(f'index = {index}') + center_guesses = np.asarray(center_guesses)[index] +# print(f'center_guesses = {center_guesses}') + if n == 0: + low = 0 + upp = index_nearest(x, (center_guesses[0]+center_guesses[1])/2) + elif n == len(center_guesses)-1: + low = index_nearest(x, (center_guesses[n-1]+center_guesses[n])/2) + upp = len(x) + else: + low = index_nearest(x, (center_guesses[n-1]+center_guesses[n])/2) + upp = index_nearest(x, (center_guesses[n]+center_guesses[n+1])/2) +# print(f'low = {low}') +# print(f'upp = {upp}') + x = x[low:upp] + y = y[low:upp] +# quick_plot(x, y, vlines=(x[0], center_guess, x[-1]), block=True) + + # Estimate FHHM + maxy = y.max() +# print(f'x_range = {x[0]} {x[-1]} {len(x)}') +# print(f'y_range = {y[0]} {y[-1]} {len(y)} {miny} {maxy}') +# print(f'center_guess = {center_guess}') + if center_guess is None: + center_index = np.argmax(y) + center = x[center_index] + height = maxy-miny + else: + if use_max_for_center: + center_index = np.argmax(y) + center = x[center_index] + if center_index < 0.1*len(x) or center_index > 0.9*len(x): + center_index = index_nearest(x, center_guess) + center = center_guess + else: + center_index = index_nearest(x, center_guess) + center = center_guess + height = y[center_index]-miny +# print(f'center_index = {center_index}') +# print(f'center = {center}') +# print(f'height = {height}') + half_height = miny+0.5*height +# print(f'half_height = {half_height}') + fwhm_index1 = 0 + for i in range(center_index, fwhm_index1, -1): + if y[i] < half_height: + fwhm_index1 = i + break +# print(f'fwhm_index1 = {fwhm_index1} {x[fwhm_index1]}') + fwhm_index2 = len(x)-1 + for i in range(center_index, fwhm_index2): + if y[i] < half_height: + fwhm_index2 = i + break +# print(f'fwhm_index2 = {fwhm_index2} {x[fwhm_index2]}') +# quick_plot((x,y,'o'), vlines=(x[fwhm_index1], center, x[fwhm_index2]), block=True) + if fwhm_index1 == 0 and fwhm_index2 < len(x)-1: + fwhm = 2*(x[fwhm_index2]-center) + elif fwhm_index1 > 0 and fwhm_index2 == len(x)-1: + fwhm = 2*(center-x[fwhm_index1]) + else: + fwhm = x[fwhm_index2]-x[fwhm_index1] +# print(f'fwhm_index1 = {fwhm_index1} {x[fwhm_index1]}') +# print(f'fwhm_index2 = {fwhm_index2} {x[fwhm_index2]}') +# print(f'fwhm = {fwhm}') + + # Return height, center and FWHM +# quick_plot((x,y,'o'), (xx,yy), vlines=(x[fwhm_index1], center, x[fwhm_index2]), block=True) + return(height, center, fwhm) + + def _check_linearity_model(self): + """Identify the linearity of all model parameters and check if the model is linear or not + """ + if not self._try_linear_fit: + logging.info('Skip linearity check (not yet supported for callable models)') + return(False) + free_parameters = [name for name, par in self._parameters.items() if par.vary] + for component in self._model.components: + if 'tmp_normalization_offset_c' in component.param_names: + continue + if isinstance(component, ExpressionModel): + for name in free_parameters: + if diff(component.expr, name, name): +# print(f'\t\t{component.expr} is non-linear in {name}') + self._nonlinear_parameters.append(name) + if name in self._linear_parameters: + self._linear_parameters.remove(name) + else: + model_parameters = component.param_names.copy() + for basename, hint in component.param_hints.items(): + name = f'{component.prefix}{basename}' + if hint.get('expr') is not None: + model_parameters.remove(name) + for name in model_parameters: + expr = self._parameters[name].expr + if expr is not None: + for nname in free_parameters: + if name in self._nonlinear_parameters: + if diff(expr, nname): +# print(f'\t\t{component} is non-linear in {nname} (through {name} = "{expr}")') + self._nonlinear_parameters.append(nname) + if nname in self._linear_parameters: + self._linear_parameters.remove(nname) + else: + assert(name in self._linear_parameters) +# print(f'\n\nexpr ({type(expr)}) = {expr}\nnname ({type(nname)}) = {nname}\n\n') + if diff(expr, nname, nname): +# print(f'\t\t{component} is non-linear in {nname} (through {name} = "{expr}")') + self._nonlinear_parameters.append(nname) + if nname in self._linear_parameters: + self._linear_parameters.remove(nname) +# print(f'\nfree parameters:\n\t{free_parameters}') +# print(f'linear parameters:\n\t{self._linear_parameters}') +# print(f'nonlinear parameters:\n\t{self._nonlinear_parameters}\n') + if any(True for name in self._nonlinear_parameters if self._parameters[name].vary): + return(False) + return(True) + + def _fit_linear_model(self, x, y): + """Perform a linear fit by direct matrix solution with numpy + """ + # Construct the matrix and the free parameter vector +# print(f'\nparameters:') +# self._parameters.pretty_print() +# print(f'\nparameter_norms:\n\t{self._parameter_norms}') +# print(f'\nlinear_parameters:\n\t{self._linear_parameters}') +# print(f'nonlinear_parameters:\n\t{self._nonlinear_parameters}') + free_parameters = [name for name, par in self._parameters.items() if par.vary] +# print(f'free parameters:\n\t{free_parameters}\n') + expr_parameters = {name:par.expr for name, par in self._parameters.items() + if par.expr is not None} + model_parameters = [] + for component in self._model.components: + if 'tmp_normalization_offset_c' in component.param_names: + continue + model_parameters += component.param_names + for basename, hint in component.param_hints.items(): + name = f'{component.prefix}{basename}' + if hint.get('expr') is not None: + expr_parameters.pop(name) + model_parameters.remove(name) +# print(f'expr parameters:\n{expr_parameters}') +# print(f'model parameters:\n\t{model_parameters}\n') + norm = 1.0 + if self._normalized: + norm = self._norm[1] +# print(f'\n\nself._normalized = {self._normalized}\nnorm = {norm}\nself._norm = {self._norm}\n') + # Add expression parameters to asteval + ast = Interpreter() +# print(f'Adding to asteval sym table:') + for name, expr in expr_parameters.items(): +# print(f'\tadding {name} {expr}') + ast.symtable[name] = expr + # Add constant parameters to asteval + # (renormalize to use correctly in evaluation of expression models) + for name, par in self._parameters.items(): + if par.expr is None and not par.vary: + if self._parameter_norms[name]: +# print(f'\tadding {name} {par.value*norm}') + ast.symtable[name] = par.value*norm + else: +# print(f'\tadding {name} {par.value}') + ast.symtable[name] = par.value + A = np.zeros((len(x), len(free_parameters)), dtype='float64') + y_const = np.zeros(len(x), dtype='float64') + have_expression_model = False + for component in self._model.components: + if isinstance(component, ConstantModel): + name = component.param_names[0] +# print(f'\nConstant model: {name} {self._parameters[name]}\n') + if name in free_parameters: +# print(f'\t\t{name} is a free constant set matrix column {free_parameters.index(name)} to 1.0') + A[:,free_parameters.index(name)] = 1.0 + else: + if self._parameter_norms[name]: + delta_y_const = self._parameters[name]*np.ones(len(x)) + else: + delta_y_const = (self._parameters[name]*norm)*np.ones(len(x)) + y_const += delta_y_const +# print(f'\ndelta_y_const ({type(delta_y_const)}):\n{delta_y_const}\n') + elif isinstance(component, ExpressionModel): + have_expression_model = True + const_expr = component.expr +# print(f'\nExpression model:\nconst_expr: {const_expr}\n') + for name in free_parameters: + dexpr_dname = diff(component.expr, name) + if dexpr_dname: + const_expr = f'{const_expr}-({str(dexpr_dname)})*{name}' +# print(f'\tconst_expr: {const_expr}') + if not self._parameter_norms[name]: + dexpr_dname = f'({dexpr_dname})/{norm}' +# print(f'\t{component.expr} is linear in {name}\n\t\tadd "{str(dexpr_dname)}" to matrix as column {free_parameters.index(name)}') + fx = [(lambda _: ast.eval(str(dexpr_dname)))(ast(f'x={v}')) for v in x] +# print(f'\tfx:\n{fx}') + if len(ast.error): + raise ValueError(f'Unable to evaluate {dexpr_dname}') + A[:,free_parameters.index(name)] += fx +# if self._parameter_norms[name]: +# print(f'\t\t{component.expr} is linear in {name} add "{str(dexpr_dname)}" to matrix as column {free_parameters.index(name)}') +# A[:,free_parameters.index(name)] += fx +# else: +# print(f'\t\t{component.expr} is linear in {name} add "({str(dexpr_dname)})/{norm}" to matrix as column {free_parameters.index(name)}') +# A[:,free_parameters.index(name)] += np.asarray(fx)/norm + # FIX: find another solution if expr not supported by simplify + const_expr = str(simplify(f'({const_expr})/{norm}')) +# print(f'\nconst_expr: {const_expr}') + delta_y_const = [(lambda _: ast.eval(const_expr))(ast(f'x = {v}')) for v in x] + y_const += delta_y_const +# print(f'\ndelta_y_const ({type(delta_y_const)}):\n{delta_y_const}\n') + if len(ast.error): + raise ValueError(f'Unable to evaluate {const_expr}') + else: + free_model_parameters = [name for name in component.param_names + if name in free_parameters or name in expr_parameters] +# print(f'\nBuild-in model ({component}):\nfree_model_parameters: {free_model_parameters}\n') + if not len(free_model_parameters): + y_const += component.eval(params=self._parameters, x=x) + elif isinstance(component, LinearModel): + if f'{component.prefix}slope' in free_model_parameters: + A[:,free_parameters.index(f'{component.prefix}slope')] = x + else: + y_const += self._parameters[f'{component.prefix}slope'].value*x + if f'{component.prefix}intercept' in free_model_parameters: + A[:,free_parameters.index(f'{component.prefix}intercept')] = 1.0 + else: + y_const += self._parameters[f'{component.prefix}intercept'].value* \ + np.ones(len(x)) + elif isinstance(component, QuadraticModel): + if f'{component.prefix}a' in free_model_parameters: + A[:,free_parameters.index(f'{component.prefix}a')] = x**2 + else: + y_const += self._parameters[f'{component.prefix}a'].value*x**2 + if f'{component.prefix}b' in free_model_parameters: + A[:,free_parameters.index(f'{component.prefix}b')] = x + else: + y_const += self._parameters[f'{component.prefix}b'].value*x + if f'{component.prefix}c' in free_model_parameters: + A[:,free_parameters.index(f'{component.prefix}c')] = 1.0 + else: + y_const += self._parameters[f'{component.prefix}c'].value*np.ones(len(x)) + else: + # At this point each build-in model must be strictly proportional to each linear + # model parameter. Without this assumption, the model equation is needed + # For the current build-in lmfit models, this can only ever be the amplitude + assert(len(free_model_parameters) == 1) + name = f'{component.prefix}amplitude' + assert(free_model_parameters[0] == name) + assert(self._parameter_norms[name]) + expr = self._parameters[name].expr + if expr is None: +# print(f'\t{component} is linear in {name} add to matrix as column {free_parameters.index(name)}') + parameters = deepcopy(self._parameters) + parameters[name].set(value=1.0) + index = free_parameters.index(name) + A[:,free_parameters.index(name)] += component.eval(params=parameters, x=x) + else: + const_expr = expr +# print(f'\tconst_expr: {const_expr}') + parameters = deepcopy(self._parameters) + parameters[name].set(value=1.0) + dcomp_dname = component.eval(params=parameters, x=x) +# print(f'\tdcomp_dname ({type(dcomp_dname)}):\n{dcomp_dname}') + for nname in free_parameters: + dexpr_dnname = diff(expr, nname) + if dexpr_dnname: + assert(self._parameter_norms[name]) +# print(f'\t\td({expr})/d{nname} = {dexpr_dnname}') +# print(f'\t\t{component} is linear in {nname} (through {name} = "{expr}", add to matrix as column {free_parameters.index(nname)})') + fx = np.asarray(dexpr_dnname*dcomp_dname, dtype='float64') +# print(f'\t\tfx ({type(fx)}): {fx}') +# print(f'free_parameters.index({nname}): {free_parameters.index(nname)}') + if self._parameter_norms[nname]: + A[:,free_parameters.index(nname)] += fx + else: + A[:,free_parameters.index(nname)] += fx/norm + const_expr = f'{const_expr}-({dexpr_dnname})*{nname}' +# print(f'\t\tconst_expr: {const_expr}') + const_expr = str(simplify(f'({const_expr})/{norm}')) +# print(f'\tconst_expr: {const_expr}') + fx = [(lambda _: ast.eval(const_expr))(ast(f'x = {v}')) for v in x] +# print(f'\tfx: {fx}') + delta_y_const = np.multiply(fx, dcomp_dname) + y_const += delta_y_const +# print(f'\ndelta_y_const ({type(delta_y_const)}):\n{delta_y_const}\n') +# print(A) +# print(y_const) + solution, residual, rank, s = np.linalg.lstsq(A, y-y_const, rcond=None) +# print(f'\nsolution ({type(solution)} {solution.shape}):\n\t{solution}') +# print(f'\nresidual ({type(residual)} {residual.shape}):\n\t{residual}') +# print(f'\nrank ({type(rank)} {rank.shape}):\n\t{rank}') +# print(f'\ns ({type(s)} {s.shape}):\n\t{s}\n') + + # Assemble result (compensate for normalization in expression models) + for name, value in zip(free_parameters, solution): + self._parameters[name].set(value=value) + if self._normalized and (have_expression_model or len(expr_parameters)): + for name, norm in self._parameter_norms.items(): + par = self._parameters[name] + if par.expr is None and norm: + self._parameters[name].set(value=par.value*self._norm[1]) +# self._parameters.pretty_print() +# print(f'\nself._parameter_norms:\n\t{self._parameter_norms}') + self._result = ModelResult(self._model, deepcopy(self._parameters)) + self._result.best_fit = self._model.eval(params=self._parameters, x=x) + if self._normalized and (have_expression_model or len(expr_parameters)): + if 'tmp_normalization_offset_c' in self._parameters: + offset = self._parameters['tmp_normalization_offset_c'] + else: + offset = 0.0 + self._result.best_fit = (self._result.best_fit-offset-self._norm[0])/self._norm[1] + if self._normalized: + for name, norm in self._parameter_norms.items(): + par = self._parameters[name] + if par.expr is None and norm: + value = par.value/self._norm[1] + self._parameters[name].set(value=value) + self._result.params[name].set(value=value) +# self._parameters.pretty_print() + self._result.residual = self._result.best_fit-y + self._result.components = self._model.components + self._result.init_params = None +# quick_plot((x, y, '.'), (x, y_const, 'g'), (x, self._result.best_fit, 'k'), (x, self._result.residual, 'r'), block=True) + + def _normalize(self): + """Normalize the data and initial parameters + """ + if self._normalized: + return + if self._norm is None: + if self._y is not None and self._y_norm is None: + self._y_norm = np.asarray(self._y) + else: + if self._y is not None and self._y_norm is None: + self._y_norm = (np.asarray(self._y)-self._norm[0])/self._norm[1] + self._y_range = 1.0 + for name, norm in self._parameter_norms.items(): + par = self._parameters[name] + if par.expr is None and norm: + value = par.value/self._norm[1] + _min = par.min + _max = par.max + if not np.isinf(_min) and abs(_min) != float_min: + _min /= self._norm[1] + if not np.isinf(_max) and abs(_max) != float_min: + _max /= self._norm[1] + par.set(value=value, min=_min, max=_max) + self._normalized = True + + def _renormalize(self): + """Renormalize the data and results + """ + if self._norm is None or not self._normalized: + return + self._normalized = False + for name, norm in self._parameter_norms.items(): + par = self._parameters[name] + if par.expr is None and norm: + value = par.value*self._norm[1] + _min = par.min + _max = par.max + if not np.isinf(_min) and abs(_min) != float_min: + _min *= self._norm[1] + if not np.isinf(_max) and abs(_max) != float_min: + _max *= self._norm[1] + par.set(value=value, min=_min, max=_max) + if self._result is None: + return + self._result.best_fit = self._result.best_fit*self._norm[1]+self._norm[0] + for name, par in self._result.params.items(): + if self._parameter_norms.get(name, False): + if par.stderr is not None: + par.stderr *= self._norm[1] + if par.expr is None: + _min = par.min + _max = par.max + value = par.value*self._norm[1] + if par.init_value is not None: + par.init_value *= self._norm[1] + if not np.isinf(_min) and abs(_min) != float_min: + _min *= self._norm[1] + if not np.isinf(_max) and abs(_max) != float_min: + _max *= self._norm[1] + par.set(value=value, min=_min, max=_max) + if hasattr(self._result, 'init_fit'): + self._result.init_fit = self._result.init_fit*self._norm[1]+self._norm[0] + if hasattr(self._result, 'init_values'): + init_values = {} + for name, value in self._result.init_values.items(): + if name not in self._parameter_norms or self._parameters[name].expr is not None: + init_values[name] = value + elif self._parameter_norms[name]: + init_values[name] = value*self._norm[1] + self._result.init_values = init_values + for name, par in self._result.init_params.items(): + if par.expr is None and self._parameter_norms.get(name, False): + value = par.value + _min = par.min + _max = par.max + value *= self._norm[1] + if not np.isinf(_min) and abs(_min) != float_min: + _min *= self._norm[1] + if not np.isinf(_max) and abs(_max) != float_min: + _max *= self._norm[1] + par.set(value=value, min=_min, max=_max) + par.init_value = par.value + # Don't renormalize chisqr, it has no useful meaning in physical units + #self._result.chisqr *= self._norm[1]*self._norm[1] + if self._result.covar is not None: + for i, name in enumerate(self._result.var_names): + if self._parameter_norms.get(name, False): + for j in range(len(self._result.var_names)): + if self._result.covar[i,j] is not None: + self._result.covar[i,j] *= self._norm[1] + if self._result.covar[j,i] is not None: + self._result.covar[j,i] *= self._norm[1] + # Don't renormalize redchi, it has no useful meaning in physical units + #self._result.redchi *= self._norm[1]*self._norm[1] + if self._result.residual is not None: + self._result.residual *= self._norm[1] + + def _reset_par_at_boundary(self, par, value): + assert(par.vary) + name = par.name + _min = self._parameter_bounds[name]['min'] + _max = self._parameter_bounds[name]['max'] + if np.isinf(_min): + if not np.isinf(_max): + if self._parameter_norms.get(name, False): + upp = _max-0.1*self._y_range + elif _max == 0.0: + upp = _max-0.1 + else: + upp = _max-0.1*abs(_max) + if value >= upp: + return(upp) + else: + if np.isinf(_max): + if self._parameter_norms.get(name, False): + low = _min+0.1*self._y_range + elif _min == 0.0: + low = _min+0.1 + else: + low = _min+0.1*abs(_min) + if value <= low: + return(low) + else: + low = 0.9*_min+0.1*_max + upp = 0.1*_min+0.9*_max + if value <= low: + return(low) + elif value >= upp: + return(upp) + return(value) + + +class FitMultipeak(Fit): + """Fit data with multiple peaks + """ + def __init__(self, y, x=None, normalize=True): + super().__init__(y, x=x, normalize=normalize) + self._fwhm_max = None + self._sigma_max = None + + @classmethod + def fit_multipeak(cls, y, centers, x=None, normalize=True, peak_models='gaussian', + center_exprs=None, fit_type=None, background_order=None, background_exp=False, + fwhm_max=None, plot_components=False): + """Make sure that centers and fwhm_max are in the correct units and consistent with expr + for a uniform fit (fit_type == 'uniform') + """ + fit = cls(y, x=x, normalize=normalize) + success = fit.fit(centers, fit_type=fit_type, peak_models=peak_models, fwhm_max=fwhm_max, + center_exprs=center_exprs, background_order=background_order, + background_exp=background_exp, plot_components=plot_components) + if success: + return(fit.best_fit, fit.residual, fit.best_values, fit.best_errors, fit.redchi, \ + fit.success) + else: + return(np.array([]), np.array([]), {}, {}, float_max, False) + + def fit(self, centers, fit_type=None, peak_models=None, center_exprs=None, fwhm_max=None, + background_order=None, background_exp=False, plot_components=False, + param_constraint=False): + self._fwhm_max = fwhm_max + # Create the multipeak model + self._create_model(centers, fit_type, peak_models, center_exprs, background_order, + background_exp, param_constraint) + + # RV: Obsolete Normalize the data and results +# print('\nBefore fit before normalization in FitMultipeak:') +# self._parameters.pretty_print() +# self._normalize() +# print('\nBefore fit after normalization in FitMultipeak:') +# self._parameters.pretty_print() + + # Perform the fit + try: + if param_constraint: + super().fit(fit_kws={'xtol': 1.e-5, 'ftol': 1.e-5, 'gtol': 1.e-5}) + else: + super().fit() + except: + return(False) + + # Check for valid fit parameter results + fit_failure = self._check_validity() + success = True + if fit_failure: + if param_constraint: + logging.warning(' -> Should not happen with param_constraint set, fail the fit') + success = False + else: + logging.info(' -> Retry fitting with constraints') + self.fit(centers, fit_type, peak_models, center_exprs, fwhm_max=fwhm_max, + background_order=background_order, background_exp=background_exp, + plot_components=plot_components, param_constraint=True) + else: + # RV: Obsolete Renormalize the data and results +# print('\nAfter fit before renormalization in FitMultipeak:') +# self._parameters.pretty_print() +# self.print_fit_report() +# self._renormalize() +# print('\nAfter fit after renormalization in FitMultipeak:') +# self._parameters.pretty_print() +# self.print_fit_report() + + # Print report and plot components if requested + if plot_components: + self.print_fit_report() + self.plot() + + return(success) + + def _create_model(self, centers, fit_type=None, peak_models=None, center_exprs=None, + background_order=None, background_exp=False, param_constraint=False): + """Create the multipeak model + """ + if isinstance(centers, (int, float)): + centers = [centers] + num_peaks = len(centers) + if peak_models is None: + peak_models = num_peaks*['gaussian'] + elif isinstance(peak_models, str): + peak_models = num_peaks*[peak_models] + if len(peak_models) != num_peaks: + raise ValueError(f'Inconsistent number of peaks in peak_models ({len(peak_models)} vs '+ + f'{num_peaks})') + if num_peaks == 1: + if fit_type is not None: + logging.debug('Ignoring fit_type input for fitting one peak') + fit_type = None + if center_exprs is not None: + logging.debug('Ignoring center_exprs input for fitting one peak') + center_exprs = None + else: + if fit_type == 'uniform': + if center_exprs is None: + center_exprs = [f'scale_factor*{cen}' for cen in centers] + if len(center_exprs) != num_peaks: + raise ValueError(f'Inconsistent number of peaks in center_exprs '+ + f'({len(center_exprs)} vs {num_peaks})') + elif fit_type == 'unconstrained' or fit_type is None: + if center_exprs is not None: + logging.warning('Ignoring center_exprs input for unconstrained fit') + center_exprs = None + else: + raise ValueError(f'Invalid fit_type in fit_multigaussian {fit_type}') + self._sigma_max = None + if param_constraint: + min_value = float_min + if self._fwhm_max is not None: + self._sigma_max = np.zeros(num_peaks) + else: + min_value = None + + # Reset the fit + self._model = None + self._parameters = Parameters() + self._result = None + + # Add background model + if background_order is not None: + if background_order == 0: + self.add_model('constant', prefix='background', parameters= + {'name': 'c', 'value': float_min, 'min': min_value}) + elif background_order == 1: + self.add_model('linear', prefix='background', slope=0.0, intercept=0.0) + elif background_order == 2: + self.add_model('quadratic', prefix='background', a=0.0, b=0.0, c=0.0) + else: + raise ValueError(f'Invalid parameter background_order ({background_order})') + if background_exp: + self.add_model('exponential', prefix='background', parameters=( + {'name': 'amplitude', 'value': float_min, 'min': min_value}, + {'name': 'decay', 'value': float_min, 'min': min_value})) + + # Add peaks and guess initial fit parameters + ast = Interpreter() + if num_peaks == 1: + height_init, cen_init, fwhm_init = self.guess_init_peak(self._x, self._y) + if self._fwhm_max is not None and fwhm_init > self._fwhm_max: + fwhm_init = self._fwhm_max + ast(f'fwhm = {fwhm_init}') + ast(f'height = {height_init}') + sig_init = ast(fwhm_factor[peak_models[0]]) + amp_init = ast(height_factor[peak_models[0]]) + sig_max = None + if self._sigma_max is not None: + ast(f'fwhm = {self._fwhm_max}') + sig_max = ast(fwhm_factor[peak_models[0]]) + self._sigma_max[0] = sig_max + self.add_model(peak_models[0], parameters=( + {'name': 'amplitude', 'value': amp_init, 'min': min_value}, + {'name': 'center', 'value': cen_init, 'min': min_value}, + {'name': 'sigma', 'value': sig_init, 'min': min_value, 'max': sig_max})) + else: + if fit_type == 'uniform': + self.add_parameter(name='scale_factor', value=1.0) + for i in range(num_peaks): + height_init, cen_init, fwhm_init = self.guess_init_peak(self._x, self._y, i, + center_guess=centers) + if self._fwhm_max is not None and fwhm_init > self._fwhm_max: + fwhm_init = self._fwhm_max + ast(f'fwhm = {fwhm_init}') + ast(f'height = {height_init}') + sig_init = ast(fwhm_factor[peak_models[i]]) + amp_init = ast(height_factor[peak_models[i]]) + sig_max = None + if self._sigma_max is not None: + ast(f'fwhm = {self._fwhm_max}') + sig_max = ast(fwhm_factor[peak_models[i]]) + self._sigma_max[i] = sig_max + if fit_type == 'uniform': + self.add_model(peak_models[i], prefix=f'peak{i+1}_', parameters=( + {'name': 'amplitude', 'value': amp_init, 'min': min_value}, + {'name': 'center', 'expr': center_exprs[i]}, + {'name': 'sigma', 'value': sig_init, 'min': min_value, + 'max': sig_max})) + else: + self.add_model('gaussian', prefix=f'peak{i+1}_', parameters=( + {'name': 'amplitude', 'value': amp_init, 'min': min_value}, + {'name': 'center', 'value': cen_init, 'min': min_value}, + {'name': 'sigma', 'value': sig_init, 'min': min_value, + 'max': sig_max})) + + def _check_validity(self): + """Check for valid fit parameter results + """ + fit_failure = False + index = compile(r'\d+') + for name, par in self.best_parameters.items(): + if 'background' in name: +# if ((name == 'backgroundc' and par['value'] <= 0.0) or +# (name.endswith('amplitude') and par['value'] <= 0.0) or + if ((name.endswith('amplitude') and par['value'] <= 0.0) or + (name.endswith('decay') and par['value'] <= 0.0)): + logging.info(f'Invalid fit result for {name} ({par["value"]})') + fit_failure = True + elif (((name.endswith('amplitude') or name.endswith('height')) and + par['value'] <= 0.0) or + ((name.endswith('sigma') or name.endswith('fwhm')) and par['value'] <= 0.0) or + (name.endswith('center') and par['value'] <= 0.0) or + (name == 'scale_factor' and par['value'] <= 0.0)): + logging.info(f'Invalid fit result for {name} ({par["value"]})') + fit_failure = True + if name.endswith('sigma') and self._sigma_max is not None: + if name == 'sigma': + sigma_max = self._sigma_max[0] + else: + sigma_max = self._sigma_max[int(index.search(name).group())-1] + if par['value'] > sigma_max: + logging.info(f'Invalid fit result for {name} ({par["value"]})') + fit_failure = True + elif par['value'] == sigma_max: + logging.warning(f'Edge result on for {name} ({par["value"]})') + if name.endswith('fwhm') and self._fwhm_max is not None: + if par['value'] > self._fwhm_max: + logging.info(f'Invalid fit result for {name} ({par["value"]})') + fit_failure = True + elif par['value'] == self._fwhm_max: + logging.warning(f'Edge result on for {name} ({par["value"]})') + return(fit_failure) + + +class FitMap(Fit): + """Fit a map of data + """ + def __init__(self, ymap, x=None, models=None, normalize=True, transpose=None, **kwargs): + super().__init__(None) + self._best_errors = None + self._best_fit = None + self._best_parameters = None + self._best_values = None + self._inv_transpose = None + self._max_nfev = None + self._memfolder = None + self._new_parameters = None + self._out_of_bounds = None + self._plot = False + self._print_report = False + self._redchi = None + self._redchi_cutoff = 0.1 + self._skip_init = True + self._success = None + self._transpose = None + self._try_no_bounds = True + + # At this point the fastest index should always be the signal dimension so that the slowest + # ndim-1 dimensions are the map dimensions + if isinstance(ymap, (tuple, list, np.ndarray)): + self._x = np.asarray(x) + elif have_xarray and isinstance(ymap, xr.DataArray): + if x is not None: + logging.warning('Ignoring superfluous input x ({x}) in Fit.__init__') + self._x = np.asarray(ymap[ymap.dims[-1]]) + else: + illegal_value(ymap, 'ymap', 'FitMap:__init__', raise_error=True) + self._ymap = ymap + + # Verify the input parameters + if self._x.ndim != 1: + raise ValueError(f'Invalid dimension for input x {self._x.ndim}') + if self._ymap.ndim < 2: + raise ValueError('Invalid number of dimension of the input dataset '+ + f'{self._ymap.ndim}') + if self._x.size != self._ymap.shape[-1]: + raise ValueError(f'Inconsistent x and y dimensions ({self._x.size} vs '+ + f'{self._ymap.shape[-1]})') + if not isinstance(normalize, bool): + logging.warning(f'Invalid value for normalize ({normalize}) in Fit.__init__: '+ + 'setting normalize to True') + normalize = True + if isinstance(transpose, bool) and not transpose: + transpose = None + if transpose is not None and self._ymap.ndim < 3: + logging.warning(f'Transpose meaningless for {self._ymap.ndim-1}D data maps: ignoring '+ + 'transpose') + if transpose is not None: + if self._ymap.ndim == 3 and isinstance(transpose, bool) and transpose: + self._transpose = (1, 0) + elif not isinstance(transpose, (tuple, list)): + logging.warning(f'Invalid data type for transpose ({transpose}, '+ + f'{type(transpose)}) in Fit.__init__: setting transpose to False') + elif len(transpose) != self._ymap.ndim-1: + logging.warning(f'Invalid dimension for transpose ({transpose}, must be equal to '+ + f'{self._ymap.ndim-1}) in Fit.__init__: setting transpose to False') + elif any(i not in transpose for i in range(len(transpose))): + logging.warning(f'Invalid index in transpose ({transpose}) '+ + f'in Fit.__init__: setting transpose to False') + elif not all(i==transpose[i] for i in range(self._ymap.ndim-1)): + self._transpose = transpose + if self._transpose is not None: + self._inv_transpose = tuple(self._transpose.index(i) + for i in range(len(self._transpose))) + + # Flatten the map (transpose if requested) + # Store the flattened map in self._ymap_norm, whether normalized or not + if self._transpose is not None: + self._ymap_norm = np.transpose(np.asarray(self._ymap), list(self._transpose)+ + [len(self._transpose)]) + else: + self._ymap_norm = np.asarray(self._ymap) + self._map_dim = int(self._ymap_norm.size/self._x.size) + self._map_shape = self._ymap_norm.shape[:-1] + self._ymap_norm = np.reshape(self._ymap_norm, (self._map_dim, self._x.size)) + + # Check if a mask is provided + if 'mask' in kwargs: + self._mask = kwargs.pop('mask') + if self._mask is None: + ymap_min = float(self._ymap_norm.min()) + ymap_max = float(self._ymap_norm.max()) + else: + self._mask = np.asarray(self._mask).astype(bool) + if self._x.size != self._mask.size: + raise ValueError(f'Inconsistent mask dimension ({self._x.size} vs '+ + f'{self._mask.size})') + ymap_masked = np.asarray(self._ymap_norm)[:,~self._mask] + ymap_min = float(ymap_masked.min()) + ymap_max = float(ymap_masked.max()) + + # Normalize the data + self._y_range = ymap_max-ymap_min + if normalize and self._y_range > 0.0: + self._norm = (ymap_min, self._y_range) + self._ymap_norm = (self._ymap_norm-self._norm[0])/self._norm[1] + else: + self._redchi_cutoff *= self._y_range**2 + if models is not None: + if callable(models) or isinstance(models, str): + kwargs = self.add_model(models, **kwargs) + elif isinstance(models, (tuple, list)): + for model in models: + kwargs = self.add_model(model, **kwargs) + self.fit(**kwargs) + + @classmethod + def fit_map(cls, ymap, models, x=None, normalize=True, **kwargs): + return(cls(ymap, x=x, models=models, normalize=normalize, **kwargs)) + + @property + def best_errors(self): + return(self._best_errors) + + @property + def best_fit(self): + return(self._best_fit) + + @property + def best_results(self): + """Convert the input data array to a data set and add the fit results. + """ + if self.best_values is None or self.best_errors is None or self.best_fit is None: + return(None) + if not have_xarray: + logging.warning('Unable to load xarray module') + return(None) + best_values = self.best_values + best_errors = self.best_errors + if isinstance(self._ymap, xr.DataArray): + best_results = self._ymap.to_dataset() + dims = self._ymap.dims + fit_name = f'{self._ymap.name}_fit' + else: + coords = {f'dim{n}_index':([f'dim{n}_index'], range(self._ymap.shape[n])) + for n in range(self._ymap.ndim-1)} + coords['x'] = (['x'], self._x) + dims = list(coords.keys()) + best_results = xr.Dataset(coords=coords) + best_results['y'] = (dims, self._ymap) + fit_name = 'y_fit' + best_results[fit_name] = (dims, self.best_fit) + if self._mask is not None: + best_results['mask'] = self._mask + for n in range(best_values.shape[0]): + best_results[f'{self._best_parameters[n]}_values'] = (dims[:-1], best_values[n]) + best_results[f'{self._best_parameters[n]}_errors'] = (dims[:-1], best_errors[n]) + best_results.attrs['components'] = self.components + return(best_results) + + @property + def best_values(self): + return(self._best_values) + + @property + def chisqr(self): + logging.warning('property chisqr not defined for fit.FitMap') + return(None) + + @property + def components(self): + components = {} + if self._result is None: + logging.warning('Unable to collect components in FitMap.components') + return(components) + for component in self._result.components: + if 'tmp_normalization_offset_c' in component.param_names: + continue + parameters = {} + for name in component.param_names: + if self._parameters[name].vary: + parameters[name] = {'free': True} + elif self._parameters[name].expr is not None: + parameters[name] = {'free': False, 'expr': self._parameters[name].expr} + else: + parameters[name] = {'free': False, 'value': self.init_parameters[name]['value']} + expr = None + if isinstance(component, ExpressionModel): + name = component._name + if name[-1] == '_': + name = name[:-1] + expr = component.expr + else: + prefix = component.prefix + if len(prefix): + if prefix[-1] == '_': + prefix = prefix[:-1] + name = f'{prefix} ({component._name})' + else: + name = f'{component._name}' + if expr is None: + components[name] = {'parameters': parameters} + else: + components[name] = {'expr': expr, 'parameters': parameters} + return(components) + + @property + def covar(self): + logging.warning('property covar not defined for fit.FitMap') + return(None) + + @property + def max_nfev(self): + return(self._max_nfev) + + @property + def num_func_eval(self): + logging.warning('property num_func_eval not defined for fit.FitMap') + return(None) + + @property + def out_of_bounds(self): + return(self._out_of_bounds) + + @property + def redchi(self): + return(self._redchi) + + @property + def residual(self): + if self.best_fit is None: + return(None) + if self._mask is None: + return(np.asarray(self._ymap)-self.best_fit) + else: + ymap_flat = np.reshape(np.asarray(self._ymap), (self._map_dim, self._x.size)) + ymap_flat_masked = ymap_flat[:,~self._mask] + ymap_masked = np.reshape(ymap_flat_masked, + list(self._map_shape)+[ymap_flat_masked.shape[-1]]) + return(ymap_masked-self.best_fit) + + @property + def success(self): + return(self._success) + + @property + def var_names(self): + logging.warning('property var_names not defined for fit.FitMap') + return(None) + + @property + def y(self): + logging.warning('property y not defined for fit.FitMap') + return(None) + + @property + def ymap(self): + return(self._ymap) + + def best_parameters(self, dims=None): + if dims is None: + return(self._best_parameters) + if not isinstance(dims, (list, tuple)) or len(dims) != len(self._map_shape): + illegal_value(dims, 'dims', 'FitMap.best_parameters', raise_error=True) + if self.best_values is None or self.best_errors is None: + logging.warning(f'Unable to obtain best parameter values for dims = {dims} in '+ + 'FitMap.best_parameters') + return({}) + # Create current parameters + parameters = deepcopy(self._parameters) + for n, name in enumerate(self._best_parameters): + if self._parameters[name].vary: + parameters[name].set(value=self.best_values[n][dims]) + parameters[name].stderr = self.best_errors[n][dims] + parameters_dict = {} + for name in sorted(parameters): + if name != 'tmp_normalization_offset_c': + par = parameters[name] + parameters_dict[name] = {'value': par.value, 'error': par.stderr, + 'init_value': self.init_parameters[name]['value'], 'min': par.min, + 'max': par.max, 'vary': par.vary, 'expr': par.expr} + return(parameters_dict) + + def freemem(self): + if self._memfolder is None: + return + try: + rmtree(self._memfolder) + self._memfolder = None + except: + logging.warning('Could not clean-up automatically.') + + def plot(self, dims, y_title=None, plot_residual=False, plot_comp_legends=False, + plot_masked_data=True): + if not isinstance(dims, (list, tuple)) or len(dims) != len(self._map_shape): + illegal_value(dims, 'dims', 'FitMap.plot', raise_error=True) + if self._result is None or self.best_fit is None or self.best_values is None: + logging.warning(f'Unable to plot fit for dims = {dims} in FitMap.plot') + return + if y_title is None or not isinstance(y_title, str): + y_title = 'data' + if self._mask is None: + mask = np.zeros(self._x.size).astype(bool) + plot_masked_data = False + else: + mask = self._mask + plots = [(self._x, np.asarray(self._ymap[dims]), 'b.')] + legend = [y_title] + if plot_masked_data: + plots += [(self._x[mask], np.asarray(self._ymap)[(*dims,mask)], 'bx')] + legend += ['masked data'] + plots += [(self._x[~mask], self.best_fit[dims], 'k-')] + legend += ['best fit'] + if plot_residual: + plots += [(self._x[~mask], self.residual[dims], 'k--')] + legend += ['residual'] + # Create current parameters + parameters = deepcopy(self._parameters) + for name in self._best_parameters: + if self._parameters[name].vary: + parameters[name].set(value= + self.best_values[self._best_parameters.index(name)][dims]) + for component in self._result.components: + if 'tmp_normalization_offset_c' in component.param_names: + continue + if isinstance(component, ExpressionModel): + prefix = component._name + if prefix[-1] == '_': + prefix = prefix[:-1] + modelname = f'{prefix}: {component.expr}' + else: + prefix = component.prefix + if len(prefix): + if prefix[-1] == '_': + prefix = prefix[:-1] + modelname = f'{prefix} ({component._name})' + else: + modelname = f'{component._name}' + if len(modelname) > 20: + modelname = f'{modelname[0:16]} ...' + y = component.eval(params=parameters, x=self._x[~mask]) + if isinstance(y, (int, float)): + y *= np.ones(self._x[~mask].size) + plots += [(self._x[~mask], y, '--')] + if plot_comp_legends: + legend.append(modelname) + quick_plot(tuple(plots), legend=legend, title=str(dims), block=True) + + def fit(self, **kwargs): +# t0 = time() + # Check input parameters + if self._model is None: + logging.error('Undefined fit model') + if 'num_proc' in kwargs: + num_proc = kwargs.pop('num_proc') + if not is_int(num_proc, ge=1): + illegal_value(num_proc, 'num_proc', 'FitMap.fit', raise_error=True) + else: + num_proc = cpu_count() + if num_proc > 1 and not have_joblib: + logging.warning(f'Missing joblib in the conda environment, running FitMap serially') + num_proc = 1 + if num_proc > cpu_count(): + logging.warning(f'The requested number of processors ({num_proc}) exceeds the maximum '+ + f'number of processors, num_proc reduced to ({cpu_count()})') + num_proc = cpu_count() + if 'try_no_bounds' in kwargs: + self._try_no_bounds = kwargs.pop('try_no_bounds') + if not isinstance(self._try_no_bounds, bool): + illegal_value(self._try_no_bounds, 'try_no_bounds', 'FitMap.fit', raise_error=True) + if 'redchi_cutoff' in kwargs: + self._redchi_cutoff = kwargs.pop('redchi_cutoff') + if not is_num(self._redchi_cutoff, gt=0): + illegal_value(self._redchi_cutoff, 'redchi_cutoff', 'FitMap.fit', raise_error=True) + if 'print_report' in kwargs: + self._print_report = kwargs.pop('print_report') + if not isinstance(self._print_report, bool): + illegal_value(self._print_report, 'print_report', 'FitMap.fit', raise_error=True) + if 'plot' in kwargs: + self._plot = kwargs.pop('plot') + if not isinstance(self._plot, bool): + illegal_value(self._plot, 'plot', 'FitMap.fit', raise_error=True) + if 'skip_init' in kwargs: + self._skip_init = kwargs.pop('skip_init') + if not isinstance(self._skip_init, bool): + illegal_value(self._skip_init, 'skip_init', 'FitMap.fit', raise_error=True) + + # Apply mask if supplied: + if 'mask' in kwargs: + self._mask = kwargs.pop('mask') + if self._mask is not None: + self._mask = np.asarray(self._mask).astype(bool) + if self._x.size != self._mask.size: + raise ValueError(f'Inconsistent x and mask dimensions ({self._x.size} vs '+ + f'{self._mask.size})') + + # Add constant offset for a normalized single component model + if self._result is None and self._norm is not None and self._norm[0]: + self.add_model('constant', prefix='tmp_normalization_offset_', parameters={'name': 'c', + 'value': -self._norm[0], 'vary': False, 'norm': True}) + #'value': -self._norm[0]/self._norm[1], 'vary': False, 'norm': False}) + + # Adjust existing parameters for refit: + if 'parameters' in kwargs: +# print('\nIn FitMap before adjusting existing parameters for refit:') +# self._parameters.pretty_print() +# if self._result is None: +# raise ValueError('Invalid parameter parameters ({parameters})') +# if self._best_values is None: +# raise ValueError('Valid self._best_values required for refitting in FitMap.fit') + parameters = kwargs.pop('parameters') +# print(f'\nparameters:\n{parameters}') + if isinstance(parameters, dict): + parameters = (parameters, ) + elif not is_dict_series(parameters): + illegal_value(parameters, 'parameters', 'Fit.fit', raise_error=True) + for par in parameters: + name = par['name'] + if name not in self._parameters: + raise ValueError(f'Unable to match {name} parameter {par} to an existing one') + if self._parameters[name].expr is not None: + raise ValueError(f'Unable to modify {name} parameter {par} (currently an '+ + 'expression)') + value = par.get('value') + vary = par.get('vary') + if par.get('expr') is not None: + raise KeyError(f'Illegal "expr" key in {name} parameter {par}') + self._parameters[name].set(value=value, vary=vary, min=par.get('min'), + max=par.get('max')) + # Overwrite existing best values for fixed parameters when a value is specified +# print(f'best values befored resetting:\n{self._best_values}') + if isinstance(value, (int, float)) and vary is False: + for i, nname in enumerate(self._best_parameters): + if nname == name: + self._best_values[i] = value +# print(f'best values after resetting (value={value}, vary={vary}):\n{self._best_values}') +#RV print('\nIn FitMap after adjusting existing parameters for refit:') +#RV self._parameters.pretty_print() + + # Check for uninitialized parameters + for name, par in self._parameters.items(): + if par.expr is None: + value = par.value + if value is None or np.isinf(value) or np.isnan(value): + value = 1.0 + if self._norm is None or name not in self._parameter_norms: + self._parameters[name].set(value=value) + elif self._parameter_norms[name]: + self._parameters[name].set(value=value*self._norm[1]) + + # Create the best parameter list, consisting of all varying parameters plus the expression + # parameters in order to collect their errors + if self._result is None: + # Initial fit + assert(self._best_parameters is None) + self._best_parameters = [name for name, par in self._parameters.items() + if par.vary or par.expr is not None] + num_new_parameters = 0 + else: + # Refit + assert(len(self._best_parameters)) + self._new_parameters = [name for name, par in self._parameters.items() + if name != 'tmp_normalization_offset_c' and name not in self._best_parameters and + (par.vary or par.expr is not None)] + num_new_parameters = len(self._new_parameters) + num_best_parameters = len(self._best_parameters) + + # Flatten and normalize the best values of the previous fit, remove the remaining results + # of the previous fit + if self._result is not None: +# print('\nBefore flatten and normalize:') +# print(f'self._best_values:\n{self._best_values}') + self._out_of_bounds = None + self._max_nfev = None + self._redchi = None + self._success = None + self._best_fit = None + self._best_errors = None + assert(self._best_values is not None) + assert(self._best_values.shape[0] == num_best_parameters) + assert(self._best_values.shape[1:] == self._map_shape) + if self._transpose is not None: + self._best_values = np.transpose(self._best_values, + [0]+[i+1 for i in self._transpose]) + self._best_values = [np.reshape(self._best_values[i], self._map_dim) + for i in range(num_best_parameters)] + if self._norm is not None: + for i, name in enumerate(self._best_parameters): + if self._parameter_norms.get(name, False): + self._best_values[i] /= self._norm[1] +#RV print('\nAfter flatten and normalize:') +#RV print(f'self._best_values:\n{self._best_values}') + + # Normalize the initial parameters (and best values for a refit) +# print('\nIn FitMap before normalize:') +# self._parameters.pretty_print() +# print(f'\nparameter_norms:\n{self._parameter_norms}\n') + self._normalize() +# print('\nIn FitMap after normalize:') +# self._parameters.pretty_print() +# print(f'\nparameter_norms:\n{self._parameter_norms}\n') + + # Prevent initial values from sitting at boundaries + self._parameter_bounds = {name:{'min': par.min, 'max': par.max} + for name, par in self._parameters.items() if par.vary} + for name, par in self._parameters.items(): + if par.vary: + par.set(value=self._reset_par_at_boundary(par, par.value)) +# print('\nAfter checking boundaries:') +# self._parameters.pretty_print() + + # Set parameter bounds to unbound (only use bounds when fit fails) + if self._try_no_bounds: + for name in self._parameter_bounds.keys(): + self._parameters[name].set(min=-np.inf, max=np.inf) + + # Allocate memory to store fit results + if self._mask is None: + x_size = self._x.size + else: + x_size = self._x[~self._mask].size + if num_proc == 1: + self._out_of_bounds_flat = np.zeros(self._map_dim, dtype=bool) + self._max_nfev_flat = np.zeros(self._map_dim, dtype=bool) + self._redchi_flat = np.zeros(self._map_dim, dtype=np.float64) + self._success_flat = np.zeros(self._map_dim, dtype=bool) + self._best_fit_flat = np.zeros((self._map_dim, x_size), + dtype=self._ymap_norm.dtype) + self._best_errors_flat = [np.zeros(self._map_dim, dtype=np.float64) + for _ in range(num_best_parameters+num_new_parameters)] + if self._result is None: + self._best_values_flat = [np.zeros(self._map_dim, dtype=np.float64) + for _ in range(num_best_parameters)] + else: + self._best_values_flat = self._best_values + self._best_values_flat += [np.zeros(self._map_dim, dtype=np.float64) + for _ in range(num_new_parameters)] + else: + self._memfolder = './joblib_memmap' + try: + mkdir(self._memfolder) + except FileExistsError: + pass + filename_memmap = path.join(self._memfolder, 'out_of_bounds_memmap') + self._out_of_bounds_flat = np.memmap(filename_memmap, dtype=bool, + shape=(self._map_dim), mode='w+') + filename_memmap = path.join(self._memfolder, 'max_nfev_memmap') + self._max_nfev_flat = np.memmap(filename_memmap, dtype=bool, + shape=(self._map_dim), mode='w+') + filename_memmap = path.join(self._memfolder, 'redchi_memmap') + self._redchi_flat = np.memmap(filename_memmap, dtype=np.float64, + shape=(self._map_dim), mode='w+') + filename_memmap = path.join(self._memfolder, 'success_memmap') + self._success_flat = np.memmap(filename_memmap, dtype=bool, + shape=(self._map_dim), mode='w+') + filename_memmap = path.join(self._memfolder, 'best_fit_memmap') + self._best_fit_flat = np.memmap(filename_memmap, dtype=self._ymap_norm.dtype, + shape=(self._map_dim, x_size), mode='w+') + self._best_errors_flat = [] + for i in range(num_best_parameters+num_new_parameters): + filename_memmap = path.join(self._memfolder, f'best_errors_memmap_{i}') + self._best_errors_flat.append(np.memmap(filename_memmap, dtype=np.float64, + shape=self._map_dim, mode='w+')) + self._best_values_flat = [] + for i in range(num_best_parameters): + filename_memmap = path.join(self._memfolder, f'best_values_memmap_{i}') + self._best_values_flat.append(np.memmap(filename_memmap, dtype=np.float64, + shape=self._map_dim, mode='w+')) + if self._result is not None: + self._best_values_flat[i][:] = self._best_values[i][:] + for i in range(num_new_parameters): + filename_memmap = path.join(self._memfolder, + f'best_values_memmap_{i+num_best_parameters}') + self._best_values_flat.append(np.memmap(filename_memmap, dtype=np.float64, + shape=self._map_dim, mode='w+')) + + # Update the best parameter list + if num_new_parameters: + self._best_parameters += self._new_parameters + + # Perform the first fit to get model component info and initial parameters + current_best_values = {} +# print(f'0 before:\n{current_best_values}') +# t1 = time() + self._result = self._fit(0, current_best_values, return_result=True, **kwargs) +# t2 = time() +# print(f'0 after:\n{current_best_values}') +# print('\nAfter the first fit:') +# self._parameters.pretty_print() +# print(self._result.fit_report(show_correl=False)) + + # Remove all irrelevant content from self._result + for attr in ('_abort', 'aborted', 'aic', 'best_fit', 'best_values', 'bic', 'calc_covar', + 'call_kws', 'chisqr', 'ci_out', 'col_deriv', 'covar', 'data', 'errorbars', + 'flatchain', 'ier', 'init_vals', 'init_fit', 'iter_cb', 'jacfcn', 'kws', + 'last_internal_values', 'lmdif_message', 'message', 'method', 'nan_policy', + 'ndata', 'nfev', 'nfree', 'params', 'redchi', 'reduce_fcn', 'residual', 'result', + 'scale_covar', 'show_candidates', 'calc_covar', 'success', 'userargs', 'userfcn', + 'userkws', 'values', 'var_names', 'weights', 'user_options'): + try: + delattr(self._result, attr) + except AttributeError: +# logging.warning(f'Unknown attribute {attr} in fit.FtMap._cleanup_result') + pass + +# t3 = time() + if num_proc == 1: + # Perform the remaining fits serially + for n in range(1, self._map_dim): +# print(f'{n} before:\n{current_best_values}') + self._fit(n, current_best_values, **kwargs) +# print(f'{n} after:\n{current_best_values}') + else: + # Perform the remaining fits in parallel + num_fit = self._map_dim-1 +# print(f'num_fit = {num_fit}') + if num_proc > num_fit: + logging.warning(f'The requested number of processors ({num_proc}) exceeds the '+ + f'number of fits, num_proc reduced to ({num_fit})') + num_proc = num_fit + num_fit_per_proc = 1 + else: + num_fit_per_proc = round((num_fit)/num_proc) + if num_proc*num_fit_per_proc < num_fit: + num_fit_per_proc +=1 +# print(f'num_fit_per_proc = {num_fit_per_proc}') + num_fit_batch = min(num_fit_per_proc, 40) +# print(f'num_fit_batch = {num_fit_batch}') + with Parallel(n_jobs=num_proc) as parallel: + parallel(delayed(self._fit_parallel)(current_best_values, num_fit_batch, + n_start, **kwargs) for n_start in range(1, self._map_dim, num_fit_batch)) +# t4 = time() + + # Renormalize the initial parameters for external use + if self._norm is not None and self._normalized: + init_values = {} + for name, value in self._result.init_values.items(): + if name not in self._parameter_norms or self._parameters[name].expr is not None: + init_values[name] = value + elif self._parameter_norms[name]: + init_values[name] = value*self._norm[1] + self._result.init_values = init_values + for name, par in self._result.init_params.items(): + if par.expr is None and self._parameter_norms.get(name, False): + _min = par.min + _max = par.max + value = par.value*self._norm[1] + if not np.isinf(_min) and abs(_min) != float_min: + _min *= self._norm[1] + if not np.isinf(_max) and abs(_max) != float_min: + _max *= self._norm[1] + par.set(value=value, min=_min, max=_max) + par.init_value = par.value + + # Remap the best results +# t5 = time() + self._out_of_bounds = np.copy(np.reshape(self._out_of_bounds_flat, self._map_shape)) + self._max_nfev = np.copy(np.reshape(self._max_nfev_flat, self._map_shape)) + self._redchi = np.copy(np.reshape(self._redchi_flat, self._map_shape)) + self._success = np.copy(np.reshape(self._success_flat, self._map_shape)) + self._best_fit = np.copy(np.reshape(self._best_fit_flat, + list(self._map_shape)+[x_size])) + self._best_values = np.asarray([np.reshape(par, list(self._map_shape)) + for par in self._best_values_flat]) + self._best_errors = np.asarray([np.reshape(par, list(self._map_shape)) + for par in self._best_errors_flat]) + if self._inv_transpose is not None: + self._out_of_bounds = np.transpose(self._out_of_bounds, self._inv_transpose) + self._max_nfev = np.transpose(self._max_nfev, self._inv_transpose) + self._redchi = np.transpose(self._redchi, self._inv_transpose) + self._success = np.transpose(self._success, self._inv_transpose) + self._best_fit = np.transpose(self._best_fit, + list(self._inv_transpose)+[len(self._inv_transpose)]) + self._best_values = np.transpose(self._best_values, + [0]+[i+1 for i in self._inv_transpose]) + self._best_errors = np.transpose(self._best_errors, + [0]+[i+1 for i in self._inv_transpose]) + del self._out_of_bounds_flat + del self._max_nfev_flat + del self._redchi_flat + del self._success_flat + del self._best_fit_flat + del self._best_values_flat + del self._best_errors_flat +# t6 = time() + + # Restore parameter bounds and renormalize the parameters + for name, par in self._parameter_bounds.items(): + self._parameters[name].set(min=par['min'], max=par['max']) + self._normalized = False + if self._norm is not None: + for name, norm in self._parameter_norms.items(): + par = self._parameters[name] + if par.expr is None and norm: + value = par.value*self._norm[1] + _min = par.min + _max = par.max + if not np.isinf(_min) and abs(_min) != float_min: + _min *= self._norm[1] + if not np.isinf(_max) and abs(_max) != float_min: + _max *= self._norm[1] + par.set(value=value, min=_min, max=_max) +# t7 = time() +# print(f'total run time in fit: {t7-t0:.2f} seconds') +# print(f'run time first fit: {t2-t1:.2f} seconds') +# print(f'run time remaining fits: {t4-t3:.2f} seconds') +# print(f'run time remapping results: {t6-t5:.2f} seconds') + +# print('\n\nAt end fit:') +# self._parameters.pretty_print() +# print(f'self._best_values:\n{self._best_values}\n\n') + + # Free the shared memory + self.freemem() + + def _fit_parallel(self, current_best_values, num, n_start, **kwargs): + num = min(num, self._map_dim-n_start) + for n in range(num): +# print(f'{n_start+n} before:\n{current_best_values}') + self._fit(n_start+n, current_best_values, **kwargs) +# print(f'{n_start+n} after:\n{current_best_values}') + + def _fit(self, n, current_best_values, return_result=False, **kwargs): +#RV print(f'\n\nstart FitMap._fit {n}\n') +#RV print(f'current_best_values = {current_best_values}') +#RV print(f'self._best_parameters = {self._best_parameters}') +#RV print(f'self._new_parameters = {self._new_parameters}\n\n') +# self._parameters.pretty_print() + # Set parameters to current best values, but prevent them from sitting at boundaries + if self._new_parameters is None: + # Initial fit + for name, value in current_best_values.items(): + par = self._parameters[name] + par.set(value=self._reset_par_at_boundary(par, value)) + else: + # Refit + for i, name in enumerate(self._best_parameters): + par = self._parameters[name] + if name in self._new_parameters: + if name in current_best_values: + par.set(value=self._reset_par_at_boundary(par, current_best_values[name])) + elif par.expr is None: + par.set(value=self._best_values[i][n]) +#RV print(f'\nbefore fit {n}') +#RV self._parameters.pretty_print() + if self._mask is None: + result = self._model.fit(self._ymap_norm[n], self._parameters, x=self._x, **kwargs) + else: + result = self._model.fit(self._ymap_norm[n][~self._mask], self._parameters, + x=self._x[~self._mask], **kwargs) +# print(f'\nafter fit {n}') +# self._parameters.pretty_print() +# print(result.fit_report(show_correl=False)) + out_of_bounds = False + for name, par in self._parameter_bounds.items(): + value = result.params[name].value + if not np.isinf(par['min']) and value < par['min']: + out_of_bounds = True + break + if not np.isinf(par['max']) and value > par['max']: + out_of_bounds = True + break + self._out_of_bounds_flat[n] = out_of_bounds + if self._try_no_bounds and out_of_bounds: + # Rerun fit with parameter bounds in place + for name, par in self._parameter_bounds.items(): + self._parameters[name].set(min=par['min'], max=par['max']) + # Set parameters to current best values, but prevent them from sitting at boundaries + if self._new_parameters is None: + # Initial fit + for name, value in current_best_values.items(): + par = self._parameters[name] + par.set(value=self._reset_par_at_boundary(par, value)) + else: + # Refit + for i, name in enumerate(self._best_parameters): + par = self._parameters[name] + if name in self._new_parameters: + if name in current_best_values: + par.set(value=self._reset_par_at_boundary(par, + current_best_values[name])) + elif par.expr is None: + par.set(value=self._best_values[i][n]) +# print('\nbefore fit') +# self._parameters.pretty_print() +# print(result.fit_report(show_correl=False)) + if self._mask is None: + result = self._model.fit(self._ymap_norm[n], self._parameters, x=self._x, **kwargs) + else: + result = self._model.fit(self._ymap_norm[n][~self._mask], self._parameters, + x=self._x[~self._mask], **kwargs) +# print(f'\nafter fit {n}') +# self._parameters.pretty_print() +# print(result.fit_report(show_correl=False)) + out_of_bounds = False + for name, par in self._parameter_bounds.items(): + value = result.params[name].value + if not np.isinf(par['min']) and value < par['min']: + out_of_bounds = True + break + if not np.isinf(par['max']) and value > par['max']: + out_of_bounds = True + break +# print(f'{n} redchi < redchi_cutoff = {result.redchi < self._redchi_cutoff} success = {result.success} out_of_bounds = {out_of_bounds}') + # Reset parameters back to unbound + for name in self._parameter_bounds.keys(): + self._parameters[name].set(min=-np.inf, max=np.inf) + assert(not out_of_bounds) + if result.redchi >= self._redchi_cutoff: + result.success = False + if result.nfev == result.max_nfev: +# print(f'Maximum number of function evaluations reached for n = {n}') +# logging.warning(f'Maximum number of function evaluations reached for n = {n}') + if result.redchi < self._redchi_cutoff: + result.success = True + self._max_nfev_flat[n] = True + if result.success: + assert(all(True for par in current_best_values if par in result.params.values())) + for par in result.params.values(): + if par.vary: + current_best_values[par.name] = par.value + else: + logging.warning(f'Fit for n = {n} failed: {result.lmdif_message}') + # Renormalize the data and results + self._renormalize(n, result) + if self._print_report: + print(result.fit_report(show_correl=False)) + if self._plot: + dims = np.unravel_index(n, self._map_shape) + if self._inv_transpose is not None: + dims= tuple(dims[self._inv_transpose[i]] for i in range(len(dims))) + super().plot(result=result, y=np.asarray(self._ymap[dims]), plot_comp_legends=True, + skip_init=self._skip_init, title=str(dims)) +#RV print(f'\n\nend FitMap._fit {n}\n') +#RV print(f'current_best_values = {current_best_values}') +# self._parameters.pretty_print() +# print(result.fit_report(show_correl=False)) +#RV print(f'\nself._best_values_flat:\n{self._best_values_flat}\n\n') + if return_result: + return(result) + + def _renormalize(self, n, result): + self._redchi_flat[n] = np.float64(result.redchi) + self._success_flat[n] = result.success + if self._norm is None or not self._normalized: + self._best_fit_flat[n] = result.best_fit + for i, name in enumerate(self._best_parameters): + self._best_values_flat[i][n] = np.float64(result.params[name].value) + self._best_errors_flat[i][n] = np.float64(result.params[name].stderr) + else: + pars = set(self._parameter_norms) & set(self._best_parameters) + for name, par in result.params.items(): + if name in pars and self._parameter_norms[name]: + if par.stderr is not None: + par.stderr *= self._norm[1] + if par.expr is None: + par.value *= self._norm[1] + if self._print_report: + if par.init_value is not None: + par.init_value *= self._norm[1] + if not np.isinf(par.min) and abs(par.min) != float_min: + par.min *= self._norm[1] + if not np.isinf(par.max) and abs(par.max) != float_min: + par.max *= self._norm[1] + self._best_fit_flat[n] = result.best_fit*self._norm[1]+self._norm[0] + for i, name in enumerate(self._best_parameters): + self._best_values_flat[i][n] = np.float64(result.params[name].value) + self._best_errors_flat[i][n] = np.float64(result.params[name].stderr) + if self._plot: + if not self._skip_init: + result.init_fit = result.init_fit*self._norm[1]+self._norm[0] + result.best_fit = np.copy(self._best_fit_flat[n]) diff -r 3fd7398a3a7f -r fc38431f257f general.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/general.py Mon Mar 20 18:30:26 2023 +0000 @@ -0,0 +1,1965 @@ +#!/usr/bin/env python3 + +#FIX write a function that returns a list of peak indices for a given plot +#FIX use raise_error concept on more functions to optionally raise an error + +# -*- coding: utf-8 -*- +""" +Created on Mon Dec 6 15:36:22 2021 + +@author: rv43 +""" + +import logging +logger=logging.getLogger(__name__) + +import os +import sys +import re +try: + from yaml import safe_load, safe_dump +except: + pass +try: + import h5py +except: + pass +import numpy as np +try: + import matplotlib.pyplot as plt + import matplotlib.lines as mlines + from matplotlib import transforms + from matplotlib.widgets import Button +except: + pass + +from ast import literal_eval +try: + from asteval import Interpreter, get_ast_names +except: + pass +from copy import deepcopy +try: + from sympy import diff, simplify +except: + pass +from time import time + + +def depth_list(L): return(isinstance(L, list) and max(map(depth_list, L))+1) +def depth_tuple(T): return(isinstance(T, tuple) and max(map(depth_tuple, T))+1) +def unwrap_tuple(T): + if depth_tuple(T) > 1 and len(T) == 1: + T = unwrap_tuple(*T) + return(T) + +def illegal_value(value, name, location=None, raise_error=False, log=True): + if not isinstance(location, str): + location = '' + else: + location = f'in {location} ' + if isinstance(name, str): + error_msg = f'Illegal value for {name} {location}({value}, {type(value)})' + else: + error_msg = f'Illegal value {location}({value}, {type(value)})' + if log: + logger.error(error_msg) + if raise_error: + raise ValueError(error_msg) + +def illegal_combination(value1, name1, value2, name2, location=None, raise_error=False, + log=True): + if not isinstance(location, str): + location = '' + else: + location = f'in {location} ' + if isinstance(name1, str): + error_msg = f'Illegal combination for {name1} and {name2} {location}'+ \ + f'({value1}, {type(value1)} and {value2}, {type(value2)})' + else: + error_msg = f'Illegal combination {location}'+ \ + f'({value1}, {type(value1)} and {value2}, {type(value2)})' + if log: + logger.error(error_msg) + if raise_error: + raise ValueError(error_msg) + +def test_ge_gt_le_lt(ge, gt, le, lt, func, location=None, raise_error=False, log=True): + """Check individual and mutual validity of ge, gt, le, lt qualifiers + func: is_int or is_num to test for int or numbers + Return: True upon success or False when mutually exlusive + """ + if ge is None and gt is None and le is None and lt is None: + return(True) + if ge is not None: + if not func(ge): + illegal_value(ge, 'ge', location, raise_error, log) + return(False) + if gt is not None: + illegal_combination(ge, 'ge', gt, 'gt', location, raise_error, log) + return(False) + elif gt is not None and not func(gt): + illegal_value(gt, 'gt', location, raise_error, log) + return(False) + if le is not None: + if not func(le): + illegal_value(le, 'le', location, raise_error, log) + return(False) + if lt is not None: + illegal_combination(le, 'le', lt, 'lt', location, raise_error, log) + return(False) + elif lt is not None and not func(lt): + illegal_value(lt, 'lt', location, raise_error, log) + return(False) + if ge is not None: + if le is not None and ge > le: + illegal_combination(ge, 'ge', le, 'le', location, raise_error, log) + return(False) + elif lt is not None and ge >= lt: + illegal_combination(ge, 'ge', lt, 'lt', location, raise_error, log) + return(False) + elif gt is not None: + if le is not None and gt >= le: + illegal_combination(gt, 'gt', le, 'le', location, raise_error, log) + return(False) + elif lt is not None and gt >= lt: + illegal_combination(gt, 'gt', lt, 'lt', location, raise_error, log) + return(False) + return(True) + +def range_string_ge_gt_le_lt(ge=None, gt=None, le=None, lt=None): + """Return a range string representation matching the ge, gt, le, lt qualifiers + Does not validate the inputs, do that as needed before calling + """ + range_string = '' + if ge is not None: + if le is None and lt is None: + range_string += f'>= {ge}' + else: + range_string += f'[{ge}, ' + elif gt is not None: + if le is None and lt is None: + range_string += f'> {gt}' + else: + range_string += f'({gt}, ' + if le is not None: + if ge is None and gt is None: + range_string += f'<= {le}' + else: + range_string += f'{le}]' + elif lt is not None: + if ge is None and gt is None: + range_string += f'< {lt}' + else: + range_string += f'{lt})' + return(range_string) + +def is_int(v, ge=None, gt=None, le=None, lt=None, raise_error=False, log=True): + """Value is an integer in range ge <= v <= le or gt < v < lt or some combination. + Return: True if yes or False is no + """ + return(_is_int_or_num(v, 'int', ge, gt, le, lt, raise_error, log)) + +def is_num(v, ge=None, gt=None, le=None, lt=None, raise_error=False, log=True): + """Value is a number in range ge <= v <= le or gt < v < lt or some combination. + Return: True if yes or False is no + """ + return(_is_int_or_num(v, 'num', ge, gt, le, lt, raise_error, log)) + +def _is_int_or_num(v, type_str, ge=None, gt=None, le=None, lt=None, raise_error=False, + log=True): + if type_str == 'int': + if not isinstance(v, int): + illegal_value(v, 'v', '_is_int_or_num', raise_error, log) + return(False) + if not test_ge_gt_le_lt(ge, gt, le, lt, is_int, '_is_int_or_num', raise_error, log): + return(False) + elif type_str == 'num': + if not isinstance(v, (int, float)): + illegal_value(v, 'v', '_is_int_or_num', raise_error, log) + return(False) + if not test_ge_gt_le_lt(ge, gt, le, lt, is_num, '_is_int_or_num', raise_error, log): + return(False) + else: + illegal_value(type_str, 'type_str', '_is_int_or_num', raise_error, log) + return(False) + if ge is None and gt is None and le is None and lt is None: + return(True) + error = False + if ge is not None and v < ge: + error = True + error_msg = f'Value {v} out of range: {v} !>= {ge}' + if not error and gt is not None and v <= gt: + error = True + error_msg = f'Value {v} out of range: {v} !> {gt}' + if not error and le is not None and v > le: + error = True + error_msg = f'Value {v} out of range: {v} !<= {le}' + if not error and lt is not None and v >= lt: + error = True + error_msg = f'Value {v} out of range: {v} !< {lt}' + if error: + if log: + logger.error(error_msg) + if raise_error: + raise ValueError(error_msg) + return(False) + return(True) + +def is_int_pair(v, ge=None, gt=None, le=None, lt=None, raise_error=False, log=True): + """Value is an integer pair, each in range ge <= v[i] <= le or gt < v[i] < lt or + ge[i] <= v[i] <= le[i] or gt[i] < v[i] < lt[i] or some combination. + Return: True if yes or False is no + """ + return(_is_int_or_num_pair(v, 'int', ge, gt, le, lt, raise_error, log)) + +def is_num_pair(v, ge=None, gt=None, le=None, lt=None, raise_error=False, log=True): + """Value is a number pair, each in range ge <= v[i] <= le or gt < v[i] < lt or + ge[i] <= v[i] <= le[i] or gt[i] < v[i] < lt[i] or some combination. + Return: True if yes or False is no + """ + return(_is_int_or_num_pair(v, 'num', ge, gt, le, lt, raise_error, log)) + +def _is_int_or_num_pair(v, type_str, ge=None, gt=None, le=None, lt=None, raise_error=False, + log=True): + if type_str == 'int': + if not (isinstance(v, (tuple, list)) and len(v) == 2 and isinstance(v[0], int) and + isinstance(v[1], int)): + illegal_value(v, 'v', '_is_int_or_num_pair', raise_error, log) + return(False) + func = is_int + elif type_str == 'num': + if not (isinstance(v, (tuple, list)) and len(v) == 2 and isinstance(v[0], (int, float)) and + isinstance(v[1], (int, float))): + illegal_value(v, 'v', '_is_int_or_num_pair', raise_error, log) + return(False) + func = is_num + else: + illegal_value(type_str, 'type_str', '_is_int_or_num_pair', raise_error, log) + return(False) + if ge is None and gt is None and le is None and lt is None: + return(True) + if ge is None or func(ge, log=True): + ge = 2*[ge] + elif not _is_int_or_num_pair(ge, type_str, raise_error=raise_error, log=log): + return(False) + if gt is None or func(gt, log=True): + gt = 2*[gt] + elif not _is_int_or_num_pair(gt, type_str, raise_error=raise_error, log=log): + return(False) + if le is None or func(le, log=True): + le = 2*[le] + elif not _is_int_or_num_pair(le, type_str, raise_error=raise_error, log=log): + return(False) + if lt is None or func(lt, log=True): + lt = 2*[lt] + elif not _is_int_or_num_pair(lt, type_str, raise_error=raise_error, log=log): + return(False) + if (not func(v[0], ge[0], gt[0], le[0], lt[0], raise_error, log) or + not func(v[1], ge[1], gt[1], le[1], lt[1], raise_error, log)): + return(False) + return(True) + +def is_int_series(l, ge=None, gt=None, le=None, lt=None, raise_error=False, log=True): + """Value is a tuple or list of integers, each in range ge <= l[i] <= le or + gt < l[i] < lt or some combination. + """ + if not test_ge_gt_le_lt(ge, gt, le, lt, is_int, 'is_int_series', raise_error, log): + return(False) + if not isinstance(l, (tuple, list)): + illegal_value(l, 'l', 'is_int_series', raise_error, log) + return(False) + if any(True if not is_int(v, ge, gt, le, lt, raise_error, log) else False for v in l): + return(False) + return(True) + +def is_num_series(l, ge=None, gt=None, le=None, lt=None, raise_error=False, log=True): + """Value is a tuple or list of numbers, each in range ge <= l[i] <= le or + gt < l[i] < lt or some combination. + """ + if not test_ge_gt_le_lt(ge, gt, le, lt, is_int, 'is_int_series', raise_error, log): + return(False) + if not isinstance(l, (tuple, list)): + illegal_value(l, 'l', 'is_num_series', raise_error, log) + return(False) + if any(True if not is_num(v, ge, gt, le, lt, raise_error, log) else False for v in l): + return(False) + return(True) + +def is_str_series(l, raise_error=False, log=True): + """Value is a tuple or list of strings. + """ + if (not isinstance(l, (tuple, list)) or + any(True if not isinstance(s, str) else False for s in l)): + illegal_value(l, 'l', 'is_str_series', raise_error, log) + return(False) + return(True) + +def is_dict_series(l, raise_error=False, log=True): + """Value is a tuple or list of dictionaries. + """ + if (not isinstance(l, (tuple, list)) or + any(True if not isinstance(d, dict) else False for d in l)): + illegal_value(l, 'l', 'is_dict_series', raise_error, log) + return(False) + return(True) + +def is_dict_nums(l, raise_error=False, log=True): + """Value is a dictionary with single number values + """ + if (not isinstance(l, dict) or + any(True if not is_num(v, log=False) else False for v in l.values())): + illegal_value(l, 'l', 'is_dict_nums', raise_error, log) + return(False) + return(True) + +def is_dict_strings(l, raise_error=False, log=True): + """Value is a dictionary with single string values + """ + if (not isinstance(l, dict) or + any(True if not isinstance(v, str) else False for v in l.values())): + illegal_value(l, 'l', 'is_dict_strings', raise_error, log) + return(False) + return(True) + +def is_index(v, ge=0, lt=None, raise_error=False, log=True): + """Value is an array index in range ge <= v < lt. + NOTE lt IS NOT included! + """ + if isinstance(lt, int): + if lt <= ge: + illegal_combination(ge, 'ge', lt, 'lt', 'is_index', raise_error, log) + return(False) + return(is_int(v, ge=ge, lt=lt, raise_error=raise_error, log=log)) + +def is_index_range(v, ge=0, le=None, lt=None, raise_error=False, log=True): + """Value is an array index range in range ge <= v[0] <= v[1] <= le or ge <= v[0] <= v[1] < lt. + NOTE le IS included! + """ + if not is_int_pair(v, raise_error=raise_error, log=log): + return(False) + if not test_ge_gt_le_lt(ge, None, le, lt, is_int, 'is_index_range', raise_error, log): + return(False) + if not ge <= v[0] <= v[1] or (le is not None and v[1] > le) or (lt is not None and v[1] >= lt): + if le is not None: + error_msg = f'Value {v} out of range: !({ge} <= {v[0]} <= {v[1]} <= {le})' + else: + error_msg = f'Value {v} out of range: !({ge} <= {v[0]} <= {v[1]} < {lt})' + if log: + logger.error(error_msg) + if raise_error: + raise ValueError(error_msg) + return(False) + return(True) + +def index_nearest(a, value): + a = np.asarray(a) + if a.ndim > 1: + raise ValueError(f'Invalid array dimension for parameter a ({a.ndim}, {a})') + # Round up for .5 + value *= 1.0+sys.float_info.epsilon + return((int)(np.argmin(np.abs(a-value)))) + +def index_nearest_low(a, value): + a = np.asarray(a) + if a.ndim > 1: + raise ValueError(f'Invalid array dimension for parameter a ({a.ndim}, {a})') + index = int(np.argmin(np.abs(a-value))) + if value < a[index] and index > 0: + index -= 1 + return(index) + +def index_nearest_upp(a, value): + a = np.asarray(a) + if a.ndim > 1: + raise ValueError(f'Invalid array dimension for parameter a ({a.ndim}, {a})') + index = int(np.argmin(np.abs(a-value))) + if value > a[index] and index < a.size-1: + index += 1 + return(index) + +def round_to_n(x, n=1): + if x == 0.0: + return(0) + else: + return(type(x)(round(x, n-1-int(np.floor(np.log10(abs(x))))))) + +def round_up_to_n(x, n=1): + xr = round_to_n(x, n) + if abs(x/xr) > 1.0: + xr += np.sign(x)*10**(np.floor(np.log10(abs(x)))+1-n) + return(type(x)(xr)) + +def trunc_to_n(x, n=1): + xr = round_to_n(x, n) + if abs(xr/x) > 1.0: + xr -= np.sign(x)*10**(np.floor(np.log10(abs(x)))+1-n) + return(type(x)(xr)) + +def almost_equal(a, b, sig_figs): + if is_num(a) and is_num(b): + return(abs(round_to_n(a-b, sig_figs)) < pow(10, -sig_figs+1)) + else: + raise ValueError(f'Invalid value for a or b in almost_equal (a: {a}, {type(a)}, '+ + f'b: {b}, {type(b)})') + return(False) + +def string_to_list(s, split_on_dash=True, remove_duplicates=True, sort=True): + """Return a list of numbers by splitting/expanding a string on any combination of + commas, whitespaces, or dashes (when split_on_dash=True) + e.g: '1, 3, 5-8, 12 ' -> [1, 3, 5, 6, 7, 8, 12] + """ + if not isinstance(s, str): + illegal_value(s, location='string_to_list') + return(None) + if not len(s): + return([]) + try: + ll = [x for x in re.split('\s+,\s+|\s+,|,\s+|\s+|,', s.strip())] + except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError): + return(None) + if split_on_dash: + try: + l = [] + for l1 in ll: + l2 = [literal_eval(x) for x in re.split('\s+-\s+|\s+-|-\s+|\s+|-', l1)] + if len(l2) == 1: + l += l2 + elif len(l2) == 2 and l2[1] > l2[0]: + l += [i for i in range(l2[0], l2[1]+1)] + else: + raise ValueError + except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError): + return(None) + else: + l = [literal_eval(x) for x in ll] + if remove_duplicates: + l = list(dict.fromkeys(l)) + if sort: + l = sorted(l) + return(l) + +def get_trailing_int(string): + indexRegex = re.compile(r'\d+$') + mo = indexRegex.search(string) + if mo is None: + return(None) + else: + return(int(mo.group())) + +def input_int(s=None, ge=None, gt=None, le=None, lt=None, default=None, inset=None, + raise_error=False, log=True): + return(_input_int_or_num('int', s, ge, gt, le, lt, default, inset, raise_error, log)) + +def input_num(s=None, ge=None, gt=None, le=None, lt=None, default=None, raise_error=False, + log=True): + return(_input_int_or_num('num', s, ge, gt, le, lt, default, None, raise_error,log)) + +def _input_int_or_num(type_str, s=None, ge=None, gt=None, le=None, lt=None, default=None, + inset=None, raise_error=False, log=True): + if type_str == 'int': + if not test_ge_gt_le_lt(ge, gt, le, lt, is_int, '_input_int_or_num', raise_error, log): + return(None) + elif type_str == 'num': + if not test_ge_gt_le_lt(ge, gt, le, lt, is_num, '_input_int_or_num', raise_error, log): + return(None) + else: + illegal_value(type_str, 'type_str', '_input_int_or_num', raise_error, log) + return(None) + if default is not None: + if not _is_int_or_num(default, type_str, raise_error=raise_error, log=log): + return(None) + if ge is not None and default < ge: + illegal_combination(ge, 'ge', default, 'default', '_input_int_or_num', raise_error, + log) + return(None) + if gt is not None and default <= gt: + illegal_combination(gt, 'gt', default, 'default', '_input_int_or_num', raise_error, + log) + return(None) + if le is not None and default > le: + illegal_combination(le, 'le', default, 'default', '_input_int_or_num', raise_error, + log) + return(None) + if lt is not None and default >= lt: + illegal_combination(lt, 'lt', default, 'default', '_input_int_or_num', raise_error, + log) + return(None) + default_string = f' [{default}]' + else: + default_string = '' + if inset is not None: + if (not isinstance(inset, (tuple, list)) or any(True if not isinstance(i, int) else + False for i in inset)): + illegal_value(inset, 'inset', '_input_int_or_num', raise_error, log) + return(None) + v_range = f'{range_string_ge_gt_le_lt(ge, gt, le, lt)}' + if len(v_range): + v_range = f' {v_range}' + if s is None: + if type_str == 'int': + print(f'Enter an integer{v_range}{default_string}: ') + else: + print(f'Enter a number{v_range}{default_string}: ') + else: + print(f'{s}{v_range}{default_string}: ') + try: + i = input() + if isinstance(i, str) and not len(i): + v = default + print(f'{v}') + else: + v = literal_eval(i) + if inset and v not in inset: + raise ValueError(f'{v} not part of the set {inset}') + except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError): + v = None + except: + if log: + logger.error('Unexpected error') + if raise_error: + raise ValueError('Unexpected error') + if not _is_int_or_num(v, type_str, ge, gt, le, lt): + v = _input_int_or_num(type_str, s, ge, gt, le, lt, default, inset, raise_error, log) + return(v) + +def input_int_list(s=None, ge=None, le=None, split_on_dash=True, remove_duplicates=True, + sort=True, raise_error=False, log=True): + """Prompt the user to input a list of interger and split the entered string on any combination + of commas, whitespaces, or dashes (when split_on_dash is True) + e.g: '1 3,5-8 , 12 ' -> [1, 3, 5, 6, 7, 8, 12] + remove_duplicates: removes duplicates if True (may also change the order) + sort: sort in ascending order if True + return None upon an illegal input + """ + return(_input_int_or_num_list('int', s, ge, le, split_on_dash, remove_duplicates, sort, + raise_error, log)) + +def input_num_list(s=None, ge=None, le=None, remove_duplicates=True, sort=True, raise_error=False, + log=True): + """Prompt the user to input a list of numbers and split the entered string on any combination + of commas or whitespaces + e.g: '1.0, 3, 5.8, 12 ' -> [1.0, 3.0, 5.8, 12.0] + remove_duplicates: removes duplicates if True (may also change the order) + sort: sort in ascending order if True + return None upon an illegal input + """ + return(_input_int_or_num_list('num', s, ge, le, False, remove_duplicates, sort, raise_error, + log)) + +def _input_int_or_num_list(type_str, s=None, ge=None, le=None, split_on_dash=True, + remove_duplicates=True, sort=True, raise_error=False, log=True): + #FIX do we want a limit on max dimension? + if type_str == 'int': + if not test_ge_gt_le_lt(ge, None, le, None, is_int, 'input_int_or_num_list', raise_error, + log): + return(None) + elif type_str == 'num': + if not test_ge_gt_le_lt(ge, None, le, None, is_num, 'input_int_or_num_list', raise_error, + log): + return(None) + else: + illegal_value(type_str, 'type_str', '_input_int_or_num_list') + return(None) + v_range = f'{range_string_ge_gt_le_lt(ge=ge, le=le)}' + if len(v_range): + v_range = f' (each value in {v_range})' + if s is None: + print(f'Enter a series of integers{v_range}: ') + else: + print(f'{s}{v_range}: ') + try: + l = string_to_list(input(), split_on_dash, remove_duplicates, sort) + except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError): + l = None + except: + print('Unexpected error') + raise + if (not isinstance(l, list) or + any(True if not _is_int_or_num(v, type_str, ge=ge, le=le) else False for v in l)): + if split_on_dash: + print('Invalid input: enter a valid set of dash/comma/whitespace separated integers '+ + 'e.g. 1 3,5-8 , 12') + else: + print('Invalid input: enter a valid set of comma/whitespace separated integers '+ + 'e.g. 1 3,5 8 , 12') + l = _input_int_or_num_list(type_str, s, ge, le, split_on_dash, remove_duplicates, sort, + raise_error, log) + return(l) + +def input_yesno(s=None, default=None): + if default is not None: + if not isinstance(default, str): + illegal_value(default, 'default', 'input_yesno') + return(None) + if default.lower() in 'yes': + default = 'y' + elif default.lower() in 'no': + default = 'n' + else: + illegal_value(default, 'default', 'input_yesno') + return(None) + default_string = f' [{default}]' + else: + default_string = '' + if s is None: + print(f'Enter yes or no{default_string}: ') + else: + print(f'{s}{default_string}: ') + i = input() + if isinstance(i, str) and not len(i): + i = default + print(f'{i}') + if i is not None and i.lower() in 'yes': + v = True + elif i is not None and i.lower() in 'no': + v = False + else: + print('Invalid input, enter yes or no') + v = input_yesno(s, default) + return(v) + +def input_menu(items, default=None, header=None): + if not isinstance(items, (tuple, list)) or any(True if not isinstance(i, str) else False + for i in items): + illegal_value(items, 'items', 'input_menu') + return(None) + if default is not None: + if not (isinstance(default, str) and default in items): + logger.error(f'Invalid value for default ({default}), must be in {items}') + return(None) + default_string = f' [{items.index(default)+1}]' + else: + default_string = '' + if header is None: + print(f'Choose one of the following items (1, {len(items)}){default_string}:') + else: + print(f'{header} (1, {len(items)}){default_string}:') + for i, choice in enumerate(items): + print(f' {i+1}: {choice}') + try: + choice = input() + if isinstance(choice, str) and not len(choice): + choice = items.index(default) + print(f'{choice+1}') + else: + choice = literal_eval(choice) + if isinstance(choice, int) and 1 <= choice <= len(items): + choice -= 1 + else: + raise ValueError + except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError): + choice = None + except: + print('Unexpected error') + raise + if choice is None: + print(f'Invalid choice, enter a number between 1 and {len(items)}') + choice = input_menu(items, default) + return(choice) + +def assert_no_duplicates_in_list_of_dicts(l: list, raise_error=False) -> list: + if not isinstance(l, list): + illegal_value(l, 'l', 'assert_no_duplicates_in_list_of_dicts', raise_error) + return(None) + if any(True if not isinstance(d, dict) else False for d in l): + illegal_value(l, 'l', 'assert_no_duplicates_in_list_of_dicts', raise_error) + return(None) + if len(l) != len([dict(t) for t in {tuple(sorted(d.items())) for d in l}]): + if raise_error: + raise ValueError(f'Duplicate items found in {l}') + else: + logger.error(f'Duplicate items found in {l}') + return(None) + else: + return(l) + +def assert_no_duplicate_key_in_list_of_dicts(l: list, key: str, raise_error=False) -> list: + if not isinstance(key, str): + illegal_value(key, 'key', 'assert_no_duplicate_key_in_list_of_dicts', raise_error) + return(None) + if not isinstance(l, list): + illegal_value(l, 'l', 'assert_no_duplicate_key_in_list_of_dicts', raise_error) + return(None) + if any(True if not isinstance(d, dict) else False for d in l): + illegal_value(l, 'l', 'assert_no_duplicates_in_list_of_dicts', raise_error) + return(None) + keys = [d.get(key, None) for d in l] + if None in keys or len(set(keys)) != len(l): + if raise_error: + raise ValueError(f'Duplicate or missing key ({key}) found in {l}') + else: + logger.error(f'Duplicate or missing key ({key}) found in {l}') + return(None) + else: + return(l) + +def assert_no_duplicate_attr_in_list_of_objs(l: list, attr: str, raise_error=False) -> list: + if not isinstance(attr, str): + illegal_value(attr, 'attr', 'assert_no_duplicate_attr_in_list_of_objs', raise_error) + return(None) + if not isinstance(l, list): + illegal_value(l, 'l', 'assert_no_duplicate_key_in_list_of_objs', raise_error) + return(None) + attrs = [getattr(obj, attr, None) for obj in l] + if None in attrs or len(set(attrs)) != len(l): + if raise_error: + raise ValueError(f'Duplicate or missing attr ({attr}) found in {l}') + else: + logger.error(f'Duplicate or missing attr ({attr}) found in {l}') + return(None) + else: + return(l) + +def file_exists_and_readable(path): + if not os.path.isfile(path): + raise ValueError(f'{path} is not a valid file') + elif not os.access(path, os.R_OK): + raise ValueError(f'{path} is not accessible for reading') + else: + return(path) + +def create_mask(x, bounds=None, exclude_bounds=False, current_mask=None): + # bounds is a pair of number in the same units a x + if not isinstance(x, (tuple, list, np.ndarray)) or not len(x): + logger.warning(f'Invalid input array ({x}, {type(x)})') + return(None) + if bounds is not None and not is_num_pair(bounds): + logger.warning(f'Invalid bounds parameter ({bounds} {type(bounds)}, input ignored') + bounds = None + if bounds is not None: + if exclude_bounds: + mask = np.logical_or(x < min(bounds), x > max(bounds)) + else: + mask = np.logical_and(x > min(bounds), x < max(bounds)) + else: + mask = np.ones(len(x), dtype=bool) + if current_mask is not None: + if not isinstance(current_mask, (tuple, list, np.ndarray)) or len(current_mask) != len(x): + logger.warning(f'Invalid current_mask ({current_mask}, {type(current_mask)}), '+ + 'input ignored') + else: + mask = np.logical_or(mask, current_mask) + if not True in mask: + logger.warning('Entire data array is masked') + return(mask) + +def eval_expr(name, expr, expr_variables, user_variables=None, max_depth=10, raise_error=False, + log=True, **kwargs): + """Evaluate an expression of expressions + """ + if not isinstance(name, str): + illegal_value(name, 'name', 'eval_expr', raise_error, log) + return(None) + if not isinstance(expr, str): + illegal_value(expr, 'expr', 'eval_expr', raise_error, log) + return(None) + if not is_dict_strings(expr_variables, log=False): + illegal_value(expr_variables, 'expr_variables', 'eval_expr', raise_error, log) + return(None) + if user_variables is not None and not is_dict_nums(user_variables, log=False): + illegal_value(user_variables, 'user_variables', 'eval_expr', raise_error, log) + return(None) + if not is_int(max_depth, gt=1, log=False): + illegal_value(max_depth, 'max_depth', 'eval_expr', raise_error, log) + return(None) + if not isinstance(raise_error, bool): + illegal_value(raise_error, 'raise_error', 'eval_expr', raise_error, log) + return(None) + if not isinstance(log, bool): + illegal_value(log, 'log', 'eval_expr', raise_error, log) + return(None) +# print(f'\nEvaluate the full expression for {expr}') + if 'chain' in kwargs: + chain = kwargs.pop('chain') + if not is_str_series(chain): + illegal_value(chain, 'chain', 'eval_expr', raise_error, log) + return(None) + else: + chain = [] + if len(chain) > max_depth: + error_msg = 'Exceeded maximum depth ({max_depth}) in eval_expr' + if log: + logger.error(error_msg) + if raise_error: + raise ValueError(error_msg) + return(None) + if name not in chain: + chain.append(name) +# print(f'start: chain = {chain}') + if 'ast' in kwargs: + ast = kwargs.pop('ast') + else: + ast = Interpreter() + if user_variables is not None: + ast.symtable.update(user_variables) + chain_vars = [var for var in get_ast_names(ast.parse(expr)) + if var in expr_variables and var not in ast.symtable] +# print(f'chain_vars: {chain_vars}') + save_chain = chain.copy() + for var in chain_vars: +# print(f'\n\tname = {name}, var = {var}:\n\t\t{expr_variables[var]}') +# print(f'\tchain = {chain}') + if var in chain: + error_msg = f'Circular variable {var} in eval_expr' + if log: + logger.error(error_msg) + if raise_error: + raise ValueError(error_msg) + return(None) +# print(f'\tknown symbols:\n\t\t{ast.user_defined_symbols()}\n') + if var in ast.user_defined_symbols(): + val = ast.symtable[var] + else: + #val = eval_expr(var, expr_variables[var], expr_variables, user_variables=user_variables, + val = eval_expr(var, expr_variables[var], expr_variables, max_depth=max_depth, + raise_error=raise_error, log=log, chain=chain, ast=ast) + if val is None: + return(None) + ast.symtable[var] = val +# print(f'\tval = {val}') +# print(f'\t{var} = {ast.symtable[var]}') + chain = save_chain.copy() +# print(f'\treset loop for {var}: chain = {chain}') + val = ast.eval(expr) +# print(f'return val for {expr} = {val}\n') + return(val) + +def full_gradient(expr, x, expr_name=None, expr_variables=None, valid_variables=None, max_depth=10, + raise_error=False, log=True, **kwargs): + """Compute the full gradient dexpr/dx + """ + if not isinstance(x, str): + illegal_value(x, 'x', 'full_gradient', raise_error, log) + return(None) + if expr_name is not None and not isinstance(expr_name, str): + illegal_value(expr_name, 'expr_name', 'eval_expr', raise_error, log) + return(None) + if expr_variables is not None and not is_dict_strings(expr_variables, log=False): + illegal_value(expr_variables, 'expr_variables', 'full_gradient', raise_error, log) + return(None) + if valid_variables is not None and not is_str_series(valid_variables, log=False): + illegal_value(valid_variables, 'valid_variables', 'full_gradient', raise_error, log) + if not is_int(max_depth, gt=1, log=False): + illegal_value(max_depth, 'max_depth', 'eval_expr', raise_error, log) + return(None) + if not isinstance(raise_error, bool): + illegal_value(raise_error, 'raise_error', 'eval_expr', raise_error, log) + return(None) + if not isinstance(log, bool): + illegal_value(log, 'log', 'eval_expr', raise_error, log) + return(None) +# print(f'\nGet full gradient of {expr_name} = {expr} with respect to {x}') + if expr_name is not None and expr_name == x: + return(1.0) + if 'chain' in kwargs: + chain = kwargs.pop('chain') + if not is_str_series(chain): + illegal_value(chain, 'chain', 'eval_expr', raise_error, log) + return(None) + else: + chain = [] + if len(chain) > max_depth: + error_msg = 'Exceeded maximum depth ({max_depth}) in eval_expr' + if log: + logger.error(error_msg) + if raise_error: + raise ValueError(error_msg) + return(None) + if expr_name is not None and expr_name not in chain: + chain.append(expr_name) +# print(f'start ({x}): chain = {chain}') + ast = Interpreter() + if expr_variables is None: + chain_vars = [] + else: + chain_vars = [var for var in get_ast_names(ast.parse(f'{expr}')) + if var in expr_variables and var != x and var not in ast.symtable] +# print(f'chain_vars: {chain_vars}') + if valid_variables is not None: + unknown_vars = [var for var in chain_vars if var not in valid_variables] + if len(unknown_vars): + error_msg = f'Unknown variable {unknown_vars} in {expr}' + if log: + logger.error(error_msg) + if raise_error: + raise ValueError(error_msg) + return(None) + dexpr_dx = diff(expr, x) +# print(f'direct gradient: d({expr})/d({x}) = {dexpr_dx} ({type(dexpr_dx)})') + save_chain = chain.copy() + for var in chain_vars: +# print(f'\n\texpr_name = {expr_name}, var = {var}:\n\t\t{expr}') +# print(f'\tchain = {chain}') + if var in chain: + error_msg = f'Circular variable {var} in full_gradient' + if log: + logger.error(error_msg) + if raise_error: + raise ValueError(error_msg) + return(None) + dexpr_dvar = diff(expr, var) +# print(f'\td({expr})/d({var}) = {dexpr_dvar}') + if dexpr_dvar: + dvar_dx = full_gradient(expr_variables[var], x, expr_name=var, + expr_variables=expr_variables, valid_variables=valid_variables, + max_depth=max_depth, raise_error=raise_error, log=log, chain=chain) +# print(f'\t\td({var})/d({x}) = {dvar_dx}') + if dvar_dx: + dexpr_dx = f'{dexpr_dx}+({dexpr_dvar})*({dvar_dx})' +# print(f'\t\t2: chain = {chain}') + chain = save_chain.copy() +# print(f'\treset loop for {var}: chain = {chain}') +# print(f'full gradient: d({expr})/d({x}) = {dexpr_dx} ({type(dexpr_dx)})') +# print(f'reset end: chain = {chain}\n\n') + return(simplify(dexpr_dx)) + +def bounds_from_mask(mask, return_include_bounds:bool=True): + bounds = [] + for i, m in enumerate(mask): + if m == return_include_bounds: + if len(bounds) == 0 or type(bounds[-1]) == tuple: + bounds.append(i) + else: + if len(bounds) > 0 and isinstance(bounds[-1], int): + bounds[-1] = (bounds[-1], i-1) + if len(bounds) > 0 and isinstance(bounds[-1], int): + bounds[-1] = (bounds[-1], mask.size-1) + return(bounds) + +def draw_mask_1d(ydata, xdata=None, current_index_ranges=None, current_mask=None, + select_mask=True, num_index_ranges_max=None, title=None, legend=None, test_mode=False): + #FIX make color blind friendly + def draw_selections(ax, current_include, current_exclude, selected_index_ranges): + ax.clear() + ax.set_title(title) + ax.legend([legend]) + ax.plot(xdata, ydata, 'k') + for (low, upp) in current_include: + xlow = 0.5*(xdata[max(0, low-1)]+xdata[low]) + xupp = 0.5*(xdata[upp]+xdata[min(num_data-1, upp+1)]) + ax.axvspan(xlow, xupp, facecolor='green', alpha=0.5) + for (low, upp) in current_exclude: + xlow = 0.5*(xdata[max(0, low-1)]+xdata[low]) + xupp = 0.5*(xdata[upp]+xdata[min(num_data-1, upp+1)]) + ax.axvspan(xlow, xupp, facecolor='red', alpha=0.5) + for (low, upp) in selected_index_ranges: + xlow = 0.5*(xdata[max(0, low-1)]+xdata[low]) + xupp = 0.5*(xdata[upp]+xdata[min(num_data-1, upp+1)]) + ax.axvspan(xlow, xupp, facecolor=selection_color, alpha=0.5) + ax.get_figure().canvas.draw() + + def onclick(event): + if event.inaxes in [fig.axes[0]]: + selected_index_ranges.append(index_nearest_upp(xdata, event.xdata)) + + def onrelease(event): + if len(selected_index_ranges) > 0: + if isinstance(selected_index_ranges[-1], int): + if event.inaxes in [fig.axes[0]]: + event.xdata = index_nearest_low(xdata, event.xdata) + if selected_index_ranges[-1] <= event.xdata: + selected_index_ranges[-1] = (selected_index_ranges[-1], event.xdata) + else: + selected_index_ranges[-1] = (event.xdata, selected_index_ranges[-1]) + draw_selections(event.inaxes, current_include, current_exclude, selected_index_ranges) + else: + selected_index_ranges.pop(-1) + + def confirm_selection(event): + plt.close() + + def clear_last_selection(event): + if len(selected_index_ranges): + selected_index_ranges.pop(-1) + else: + while len(current_include): + current_include.pop() + while len(current_exclude): + current_exclude.pop() + selected_mask.fill(False) + draw_selections(ax, current_include, current_exclude, selected_index_ranges) + + def update_mask(mask, selected_index_ranges, unselected_index_ranges): + for (low, upp) in selected_index_ranges: + selected_mask = np.logical_and(xdata >= xdata[low], xdata <= xdata[upp]) + mask = np.logical_or(mask, selected_mask) + for (low, upp) in unselected_index_ranges: + unselected_mask = np.logical_and(xdata >= xdata[low], xdata <= xdata[upp]) + mask[unselected_mask] = False + return(mask) + + def update_index_ranges(mask): + # Update the currently included index ranges (where mask is True) + current_include = [] + for i, m in enumerate(mask): + if m == True: + if len(current_include) == 0 or type(current_include[-1]) == tuple: + current_include.append(i) + else: + if len(current_include) > 0 and isinstance(current_include[-1], int): + current_include[-1] = (current_include[-1], i-1) + if len(current_include) > 0 and isinstance(current_include[-1], int): + current_include[-1] = (current_include[-1], num_data-1) + return(current_include) + + # Check inputs + ydata = np.asarray(ydata) + if ydata.ndim > 1: + logger.warning(f'Invalid ydata dimension ({ydata.ndim})') + return(None, None) + num_data = ydata.size + if xdata is None: + xdata = np.arange(num_data) + else: + xdata = np.asarray(xdata, dtype=np.float64) + if xdata.ndim > 1 or xdata.size != num_data: + logger.warning(f'Invalid xdata shape ({xdata.shape})') + return(None, None) + if not np.all(xdata[:-1] < xdata[1:]): + logger.warning('Invalid xdata: must be monotonically increasing') + return(None, None) + if current_index_ranges is not None: + if not isinstance(current_index_ranges, (tuple, list)): + logger.warning('Invalid current_index_ranges parameter ({current_index_ranges}, '+ + f'{type(current_index_ranges)})') + return(None, None) + if not isinstance(select_mask, bool): + logger.warning('Invalid select_mask parameter ({select_mask}, {type(select_mask)})') + return(None, None) + if num_index_ranges_max is not None: + logger.warning('num_index_ranges_max input not yet implemented in draw_mask_1d') + if title is None: + title = 'select ranges of data' + elif not isinstance(title, str): + illegal(title, 'title') + title = '' + if legend is None and not isinstance(title, str): + illegal(legend, 'legend') + legend = None + + if select_mask: + title = f'Click and drag to {title} you wish to include' + selection_color = 'green' + else: + title = f'Click and drag to {title} you wish to exclude' + selection_color = 'red' + + # Set initial selected mask and the selected/unselected index ranges as needed + selected_index_ranges = [] + unselected_index_ranges = [] + selected_mask = np.full(xdata.shape, False, dtype=bool) + if current_index_ranges is None: + if current_mask is None: + if not select_mask: + selected_index_ranges = [(0, num_data-1)] + selected_mask = np.full(xdata.shape, True, dtype=bool) + else: + selected_mask = np.copy(np.asarray(current_mask, dtype=bool)) + if current_index_ranges is not None and len(current_index_ranges): + current_index_ranges = sorted([(low, upp) for (low, upp) in current_index_ranges]) + for (low, upp) in current_index_ranges: + if low > upp or low >= num_data or upp < 0: + continue + if low < 0: + low = 0 + if upp >= num_data: + upp = num_data-1 + selected_index_ranges.append((low, upp)) + selected_mask = update_mask(selected_mask, selected_index_ranges, unselected_index_ranges) + if current_index_ranges is not None and current_mask is not None: + selected_mask = np.logical_and(current_mask, selected_mask) + if current_mask is not None: + selected_index_ranges = update_index_ranges(selected_mask) + + # Set up range selections for display + current_include = selected_index_ranges + current_exclude = [] + selected_index_ranges = [] + if not len(current_include): + if select_mask: + current_exclude = [(0, num_data-1)] + else: + current_include = [(0, num_data-1)] + else: + if current_include[0][0] > 0: + current_exclude.append((0, current_include[0][0]-1)) + for i in range(1, len(current_include)): + current_exclude.append((current_include[i-1][1]+1, current_include[i][0]-1)) + if current_include[-1][1] < num_data-1: + current_exclude.append((current_include[-1][1]+1, num_data-1)) + + if not test_mode: + + # Set up matplotlib figure + plt.close('all') + fig, ax = plt.subplots() + plt.subplots_adjust(bottom=0.2) + draw_selections(ax, current_include, current_exclude, selected_index_ranges) + + # Set up event handling for click-and-drag range selection + cid_click = fig.canvas.mpl_connect('button_press_event', onclick) + cid_release = fig.canvas.mpl_connect('button_release_event', onrelease) + + # Set up confirm / clear range selection buttons + confirm_b = Button(plt.axes([0.75, 0.05, 0.15, 0.075]), 'Confirm') + clear_b = Button(plt.axes([0.59, 0.05, 0.15, 0.075]), 'Clear') + cid_confirm = confirm_b.on_clicked(confirm_selection) + cid_clear = clear_b.on_clicked(clear_last_selection) + + # Show figure + plt.show(block=True) + + # Disconnect callbacks when figure is closed + fig.canvas.mpl_disconnect(cid_click) + fig.canvas.mpl_disconnect(cid_release) + confirm_b.disconnect(cid_confirm) + clear_b.disconnect(cid_clear) + + # Swap selection depending on select_mask + if not select_mask: + selected_index_ranges, unselected_index_ranges = unselected_index_ranges, \ + selected_index_ranges + + # Update the mask with the currently selected/unselected x-ranges + selected_mask = update_mask(selected_mask, selected_index_ranges, unselected_index_ranges) + + # Update the currently included index ranges (where mask is True) + current_include = update_index_ranges(selected_mask) + + return(selected_mask, current_include) + +def select_peaks(ydata:np.ndarray, x_values:np.ndarray=None, x_mask:np.ndarray=None, + peak_x_values:np.ndarray=np.array([]), peak_x_indices:np.ndarray=np.array([]), + return_peak_x_values:bool=False, return_peak_x_indices:bool=False, + return_peak_input_indices:bool=False, return_sorted:bool=False, + title:str=None, xlabel:str=None, ylabel:str=None) -> list : + + # Check arguments + if (len(peak_x_values) > 0 or return_peak_x_values) and not len(x_values) > 0: + raise RuntimeError('Cannot use peak_x_values or return_peak_x_values without x_values') + if not ((len(peak_x_values) > 0) ^ (len(peak_x_indices) > 0)): + raise RuntimeError('Use exactly one of peak_x_values or peak_x_indices') + return_format_iter = iter((return_peak_x_values, return_peak_x_indices, return_peak_input_indices)) + if not (any(return_format_iter) and not any(return_format_iter)): + raise RuntimeError('Exactly one of return_peak_x_values, return_peak_x_indices, or '+ + 'return_peak_input_indices must be True') + + EXCLUDE_PEAK_PROPERTIES = {'color': 'black', 'linestyle': '--','linewidth': 1, + 'marker': 10, 'markersize': 5, 'fillstyle': 'none'} + INCLUDE_PEAK_PROPERTIES = {'color': 'green', 'linestyle': '-', 'linewidth': 2, + 'marker': 10, 'markersize': 10, 'fillstyle': 'full'} + MASKED_PEAK_PROPERTIES = {'color': 'gray', 'linestyle': ':', 'linewidth': 1} + + # Setup reference data & plot + x_indices = np.arange(len(ydata)) + if x_values is None: + x_values = x_indices + if x_mask is None: + x_mask = np.full(x_values.shape, True, dtype=bool) + fig, ax = plt.subplots() + handles = ax.plot(x_values, ydata, label='Reference data') + handles.append(mlines.Line2D([], [], label='Excluded / unselected HKL', **EXCLUDE_PEAK_PROPERTIES)) + handles.append(mlines.Line2D([], [], label='Included / selected HKL', **INCLUDE_PEAK_PROPERTIES)) + handles.append(mlines.Line2D([], [], label='HKL in masked region (unselectable)', **MASKED_PEAK_PROPERTIES)) + ax.legend(handles=handles, loc='upper right') + ax.set(title=title, xlabel=xlabel, ylabel=ylabel) + + + # Plot vertical line at each peak + value_to_index = lambda x_value: int(np.argmin(abs(x_values - x_value))) + if len(peak_x_indices) > 0: + peak_x_values = x_values[peak_x_indices] + else: + peak_x_indices = np.array(list(map(value_to_index, peak_x_values))) + peak_vlines = [] + for loc in peak_x_values: + nearest_index = value_to_index(loc) + if nearest_index in x_indices[x_mask]: + peak_vline = ax.axvline(loc, **EXCLUDE_PEAK_PROPERTIES) + peak_vline.set_picker(5) + else: + peak_vline = ax.axvline(loc, **MASKED_PEAK_PROPERTIES) + peak_vlines.append(peak_vline) + + # Indicate masked regions by gray-ing out the axes facecolor + mask_exclude_bounds = bounds_from_mask(x_mask, return_include_bounds=False) + for (low, upp) in mask_exclude_bounds: + xlow = x_values[low] + xupp = x_values[upp] + ax.axvspan(xlow, xupp, facecolor='gray', alpha=0.5) + + # Setup peak picking + selected_peak_input_indices = [] + def onpick(event): + try: + peak_index = peak_vlines.index(event.artist) + except: + pass + else: + peak_vline = event.artist + if peak_index in selected_peak_input_indices: + peak_vline.set(**EXCLUDE_PEAK_PROPERTIES) + selected_peak_input_indices.remove(peak_index) + else: + peak_vline.set(**INCLUDE_PEAK_PROPERTIES) + selected_peak_input_indices.append(peak_index) + plt.draw() + cid_pick_peak = fig.canvas.mpl_connect('pick_event', onpick) + + # Setup "Confirm" button + def confirm_selection(event): + plt.close() + plt.subplots_adjust(bottom=0.2) + confirm_b = Button(plt.axes([0.75, 0.05, 0.15, 0.075]), 'Confirm') + cid_confirm = confirm_b.on_clicked(confirm_selection) + + # Show figure for user interaction + plt.show() + + # Disconnect callbacks when figure is closed + fig.canvas.mpl_disconnect(cid_pick_peak) + confirm_b.disconnect(cid_confirm) + + if return_peak_input_indices: + selected_peaks = np.array(selected_peak_input_indices) + if return_peak_x_values: + selected_peaks = peak_x_values[selected_peak_input_indices] + if return_peak_x_indices: + selected_peaks = peak_x_indices[selected_peak_input_indices] + + if return_sorted: + selected_peaks.sort() + + return(selected_peaks) + +def find_image_files(path, filetype, name=None): + if isinstance(name, str): + name = f'{name.strip()} ' + else: + name = '' + # Find available index range + if filetype == 'tif': + if not isinstance(path, str) or not os.path.isdir(path): + illegal_value(path, 'path', 'find_image_files') + return(-1, 0, []) + indexRegex = re.compile(r'\d+') + # At this point only tiffs + files = sorted([f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) and + f.endswith('.tif') and indexRegex.search(f)]) + num_img = len(files) + if num_img < 1: + logger.warning(f'No available {name}files') + return(-1, 0, []) + first_index = indexRegex.search(files[0]).group() + last_index = indexRegex.search(files[-1]).group() + if first_index is None or last_index is None: + logger.error(f'Unable to find correctly indexed {name}images') + return(-1, 0, []) + first_index = int(first_index) + last_index = int(last_index) + if num_img != last_index-first_index+1: + logger.error(f'Non-consecutive set of indices for {name}images') + return(-1, 0, []) + paths = [os.path.join(path, f) for f in files] + elif filetype == 'h5': + if not isinstance(path, str) or not os.path.isfile(path): + illegal_value(path, 'path', 'find_image_files') + return(-1, 0, []) + # At this point only h5 in alamo2 detector style + first_index = 0 + with h5py.File(path, 'r') as f: + num_img = f['entry/instrument/detector/data'].shape[0] + last_index = num_img-1 + paths = [path] + else: + illegal_value(filetype, 'filetype', 'find_image_files') + return(-1, 0, []) + logger.info(f'Number of available {name}images: {num_img}') + logger.info(f'Index range of available {name}images: [{first_index}, '+ + f'{last_index}]') + + return(first_index, num_img, paths) + +def select_image_range(first_index, offset, num_available, num_img=None, name=None, + num_required=None): + if isinstance(name, str): + name = f'{name.strip()} ' + else: + name = '' + # Check existing values + if not is_int(num_available, gt=0): + logger.warning(f'No available {name}images') + return(0, 0, 0) + if num_img is not None and not is_int(num_img, ge=0): + illegal_value(num_img, 'num_img', 'select_image_range') + return(0, 0, 0) + if is_int(first_index, ge=0) and is_int(offset, ge=0): + if num_required is None: + if input_yesno(f'\nCurrent {name}first image index/offset = {first_index}/{offset},'+ + 'use these values (y/n)?', 'y'): + if num_img is not None: + if input_yesno(f'Current number of {name}images = {num_img}, '+ + 'use this value (y/n)? ', 'y'): + return(first_index, offset, num_img) + else: + if input_yesno(f'Number of available {name}images = {num_available}, '+ + 'use all (y/n)? ', 'y'): + return(first_index, offset, num_available) + else: + if input_yesno(f'\nCurrent {name}first image offset = {offset}, '+ + f'use this values (y/n)?', 'y'): + return(first_index, offset, num_required) + + # Check range against requirements + if num_required is None: + if num_available == 1: + return(first_index, 0, 1) + else: + if not is_int(num_required, ge=1): + illegal_value(num_required, 'num_required', 'select_image_range') + return(0, 0, 0) + if num_available < num_required: + logger.error(f'Unable to find the required {name}images ({num_available} out of '+ + f'{num_required})') + return(0, 0, 0) + + # Select index range + print(f'\nThe number of available {name}images is {num_available}') + if num_required is None: + last_index = first_index+num_available + use_all = f'Use all ([{first_index}, {last_index}])' + pick_offset = 'Pick the first image index offset and the number of images' + pick_bounds = 'Pick the first and last image index' + choice = input_menu([use_all, pick_offset, pick_bounds], default=pick_offset) + if not choice: + offset = 0 + num_img = num_available + elif choice == 1: + offset = input_int('Enter the first index offset', ge=0, le=last_index-first_index) + if first_index+offset == last_index: + num_img = 1 + else: + num_img = input_int('Enter the number of images', ge=1, le=num_available-offset) + else: + offset = input_int('Enter the first index', ge=first_index, le=last_index) + num_img = 1-offset+input_int('Enter the last index', ge=offset, le=last_index) + offset -= first_index + else: + use_all = f'Use ([{first_index}, {first_index+num_required-1}])' + pick_offset = 'Pick the first index offset' + choice = input_menu([use_all, pick_offset], pick_offset) + offset = 0 + if choice == 1: + offset = input_int('Enter the first index offset', ge=0, le=num_available-num_required) + num_img = num_required + + return(first_index, offset, num_img) + +def load_image(f, img_x_bounds=None, img_y_bounds=None): + """Load a single image from file. + """ + if not os.path.isfile(f): + logger.error(f'Unable to load {f}') + return(None) + img_read = plt.imread(f) + if not img_x_bounds: + img_x_bounds = (0, img_read.shape[0]) + else: + if (not isinstance(img_x_bounds, (tuple, list)) or len(img_x_bounds) != 2 or + not (0 <= img_x_bounds[0] < img_x_bounds[1] <= img_read.shape[0])): + logger.error(f'inconsistent row dimension in {f}') + return(None) + if not img_y_bounds: + img_y_bounds = (0, img_read.shape[1]) + else: + if (not isinstance(img_y_bounds, list) or len(img_y_bounds) != 2 or + not (0 <= img_y_bounds[0] < img_y_bounds[1] <= img_read.shape[1])): + logger.error(f'inconsistent column dimension in {f}') + return(None) + return(img_read[img_x_bounds[0]:img_x_bounds[1],img_y_bounds[0]:img_y_bounds[1]]) + +def load_image_stack(files, filetype, img_offset, num_img, num_img_skip=0, + img_x_bounds=None, img_y_bounds=None): + """Load a set of images and return them as a stack. + """ + logger.debug(f'img_offset = {img_offset}') + logger.debug(f'num_img = {num_img}') + logger.debug(f'num_img_skip = {num_img_skip}') + logger.debug(f'\nfiles:\n{files}\n') + img_stack = np.array([]) + if filetype == 'tif': + img_read_stack = [] + i = 1 + t0 = time() + for f in files[img_offset:img_offset+num_img:num_img_skip+1]: + if not i%20: + logger.info(f' loading {i}/{num_img}: {f}') + else: + logger.debug(f' loading {i}/{num_img}: {f}') + img_read = load_image(f, img_x_bounds, img_y_bounds) + img_read_stack.append(img_read) + i += num_img_skip+1 + img_stack = np.stack([img_read for img_read in img_read_stack]) + logger.info(f'... done in {time()-t0:.2f} seconds!') + logger.debug(f'img_stack shape = {np.shape(img_stack)}') + del img_read_stack, img_read + elif filetype == 'h5': + if not isinstance(files[0], str) and not os.path.isfile(files[0]): + illegal_value(files[0], 'files[0]', 'load_image_stack') + return(img_stack) + t0 = time() + logger.info(f'Loading {files[0]}') + with h5py.File(files[0], 'r') as f: + shape = f['entry/instrument/detector/data'].shape + if len(shape) != 3: + logger.error(f'inconsistent dimensions in {files[0]}') + if not img_x_bounds: + img_x_bounds = (0, shape[1]) + else: + if (not isinstance(img_x_bounds, (tuple, list)) or len(img_x_bounds) != 2 or + not (0 <= img_x_bounds[0] < img_x_bounds[1] <= shape[1])): + logger.error(f'inconsistent row dimension in {files[0]} {img_x_bounds} '+ + f'{shape[1]}') + if not img_y_bounds: + img_y_bounds = (0, shape[2]) + else: + if (not isinstance(img_y_bounds, list) or len(img_y_bounds) != 2 or + not (0 <= img_y_bounds[0] < img_y_bounds[1] <= shape[2])): + logger.error(f'inconsistent column dimension in {files[0]}') + img_stack = f.get('entry/instrument/detector/data')[ + img_offset:img_offset+num_img:num_img_skip+1, + img_x_bounds[0]:img_x_bounds[1],img_y_bounds[0]:img_y_bounds[1]] + logger.info(f'... done in {time()-t0:.2f} seconds!') + else: + illegal_value(filetype, 'filetype', 'load_image_stack') + return(img_stack) + +def combine_tiffs_in_h5(files, num_img, h5_filename): + img_stack = load_image_stack(files, 'tif', 0, num_img) + with h5py.File(h5_filename, 'w') as f: + f.create_dataset('entry/instrument/detector/data', data=img_stack) + del img_stack + return([h5_filename]) + +def clear_imshow(title=None): + plt.ioff() + if title is None: + title = 'quick imshow' + elif not isinstance(title, str): + illegal_value(title, 'title', 'clear_imshow') + return + plt.close(fig=title) + +def clear_plot(title=None): + plt.ioff() + if title is None: + title = 'quick plot' + elif not isinstance(title, str): + illegal_value(title, 'title', 'clear_plot') + return + plt.close(fig=title) + +def quick_imshow(a, title=None, path=None, name=None, save_fig=False, save_only=False, + clear=True, extent=None, show_grid=False, grid_color='w', grid_linewidth=1, + block=False, **kwargs): + if title is not None and not isinstance(title, str): + illegal_value(title, 'title', 'quick_imshow') + return + if path is not None and not isinstance(path, str): + illegal_value(path, 'path', 'quick_imshow') + return + if not isinstance(save_fig, bool): + illegal_value(save_fig, 'save_fig', 'quick_imshow') + return + if not isinstance(save_only, bool): + illegal_value(save_only, 'save_only', 'quick_imshow') + return + if not isinstance(clear, bool): + illegal_value(clear, 'clear', 'quick_imshow') + return + if not isinstance(block, bool): + illegal_value(block, 'block', 'quick_imshow') + return + if not title: + title='quick imshow' +# else: +# title = re.sub(r"\s+", '_', title) + if name is None: + ttitle = re.sub(r"\s+", '_', title) + if path is None: + path = f'{ttitle}.png' + else: + path = f'{path}/{ttitle}.png' + else: + if path is None: + path = name + else: + path = f'{path}/{name}' + if 'cmap' in kwargs and a.ndim == 3 and (a.shape[2] == 3 or a.shape[2] == 4): + use_cmap = True + if a.shape[2] == 4 and a[:,:,-1].min() != a[:,:,-1].max(): + use_cmap = False + if any(True if a[i,j,0] != a[i,j,1] and a[i,j,0] != a[i,j,2] else False + for i in range(a.shape[0]) for j in range(a.shape[1])): + use_cmap = False + if use_cmap: + a = a[:,:,0] + else: + logger.warning('Image incompatible with cmap option, ignore cmap') + kwargs.pop('cmap') + if extent is None: + extent = (0, a.shape[1], a.shape[0], 0) + if clear: + try: + plt.close(fig=title) + except: + pass + if not save_only: + if block: + plt.ioff() + else: + plt.ion() + plt.figure(title) + plt.imshow(a, extent=extent, **kwargs) + if show_grid: + ax = plt.gca() + ax.grid(color=grid_color, linewidth=grid_linewidth) +# if title != 'quick imshow': +# plt.title = title + if save_only: + plt.savefig(path) + plt.close(fig=title) + else: + if save_fig: + plt.savefig(path) + if block: + plt.show(block=block) + +def quick_plot(*args, xerr=None, yerr=None, vlines=None, title=None, xlim=None, ylim=None, + xlabel=None, ylabel=None, legend=None, path=None, name=None, show_grid=False, + save_fig=False, save_only=False, clear=True, block=False, **kwargs): + if title is not None and not isinstance(title, str): + illegal_value(title, 'title', 'quick_plot') + title = None + if xlim is not None and not isinstance(xlim, (tuple, list)) and len(xlim) != 2: + illegal_value(xlim, 'xlim', 'quick_plot') + xlim = None + if ylim is not None and not isinstance(ylim, (tuple, list)) and len(ylim) != 2: + illegal_value(ylim, 'ylim', 'quick_plot') + ylim = None + if xlabel is not None and not isinstance(xlabel, str): + illegal_value(xlabel, 'xlabel', 'quick_plot') + xlabel = None + if ylabel is not None and not isinstance(ylabel, str): + illegal_value(ylabel, 'ylabel', 'quick_plot') + ylabel = None + if legend is not None and not isinstance(legend, (tuple, list)): + illegal_value(legend, 'legend', 'quick_plot') + legend = None + if path is not None and not isinstance(path, str): + illegal_value(path, 'path', 'quick_plot') + return + if not isinstance(show_grid, bool): + illegal_value(show_grid, 'show_grid', 'quick_plot') + return + if not isinstance(save_fig, bool): + illegal_value(save_fig, 'save_fig', 'quick_plot') + return + if not isinstance(save_only, bool): + illegal_value(save_only, 'save_only', 'quick_plot') + return + if not isinstance(clear, bool): + illegal_value(clear, 'clear', 'quick_plot') + return + if not isinstance(block, bool): + illegal_value(block, 'block', 'quick_plot') + return + if title is None: + title = 'quick plot' +# else: +# title = re.sub(r"\s+", '_', title) + if name is None: + ttitle = re.sub(r"\s+", '_', title) + if path is None: + path = f'{ttitle}.png' + else: + path = f'{path}/{ttitle}.png' + else: + if path is None: + path = name + else: + path = f'{path}/{name}' + if clear: + try: + plt.close(fig=title) + except: + pass + args = unwrap_tuple(args) + if depth_tuple(args) > 1 and (xerr is not None or yerr is not None): + logger.warning('Error bars ignored form multiple curves') + if not save_only: + if block: + plt.ioff() + else: + plt.ion() + plt.figure(title) + if depth_tuple(args) > 1: + for y in args: + plt.plot(*y, **kwargs) + else: + if xerr is None and yerr is None: + plt.plot(*args, **kwargs) + else: + plt.errorbar(*args, xerr=xerr, yerr=yerr, **kwargs) + if vlines is not None: + if isinstance(vlines, (int, float)): + vlines = [vlines] + for v in vlines: + plt.axvline(v, color='r', linestyle='--', **kwargs) +# if vlines is not None: +# for s in tuple(([x, x], list(plt.gca().get_ylim())) for x in vlines): +# plt.plot(*s, color='red', **kwargs) + if xlim is not None: + plt.xlim(xlim) + if ylim is not None: + plt.ylim(ylim) + if xlabel is not None: + plt.xlabel(xlabel) + if ylabel is not None: + plt.ylabel(ylabel) + if show_grid: + ax = plt.gca() + ax.grid(color='k')#, linewidth=1) + if legend is not None: + plt.legend(legend) + if save_only: + plt.savefig(path) + plt.close(fig=title) + else: + if save_fig: + plt.savefig(path) + if block: + plt.show(block=block) + +def select_array_bounds(a, x_low=None, x_upp=None, num_x_min=None, ask_bounds=False, + title='select array bounds'): + """Interactively select the lower and upper data bounds for a numpy array. + """ + if isinstance(a, (tuple, list)): + a = np.array(a) + if not isinstance(a, np.ndarray) or a.ndim != 1: + illegal_value(a.ndim, 'array type or dimension', 'select_array_bounds') + return(None) + len_a = len(a) + if num_x_min is None: + num_x_min = 1 + else: + if num_x_min < 2 or num_x_min > len_a: + logger.warning('Invalid value for num_x_min in select_array_bounds, input ignored') + num_x_min = 1 + + # Ask to use current bounds + if ask_bounds and (x_low is not None or x_upp is not None): + if x_low is None: + x_low = 0 + if not is_int(x_low, ge=0, le=len_a-num_x_min): + illegal_value(x_low, 'x_low', 'select_array_bounds') + return(None) + if x_upp is None: + x_upp = len_a + if not is_int(x_upp, ge=x_low+num_x_min, le=len_a): + illegal_value(x_upp, 'x_upp', 'select_array_bounds') + return(None) + quick_plot((range(len_a), a), vlines=(x_low,x_upp), title=title) + if not input_yesno(f'\nCurrent array bounds: [{x_low}, {x_upp}] '+ + 'use these values (y/n)?', 'y'): + x_low = None + x_upp = None + else: + clear_plot(title) + return(x_low, x_upp) + + if x_low is None: + x_min = 0 + x_max = len_a + x_low_max = len_a-num_x_min + while True: + quick_plot(range(x_min, x_max), a[x_min:x_max], title=title) + zoom_flag = input_yesno('Set lower data bound (y) or zoom in (n)?', 'y') + if zoom_flag: + x_low = input_int(' Set lower data bound', ge=0, le=x_low_max) + break + else: + x_min = input_int(' Set lower zoom index', ge=0, le=x_low_max) + x_max = input_int(' Set upper zoom index', ge=x_min+1, le=x_low_max+1) + else: + if not is_int(x_low, ge=0, le=len_a-num_x_min): + illegal_value(x_low, 'x_low', 'select_array_bounds') + return(None) + if x_upp is None: + x_min = x_low+num_x_min + x_max = len_a + x_upp_min = x_min + while True: + quick_plot(range(x_min, x_max), a[x_min:x_max], title=title) + zoom_flag = input_yesno('Set upper data bound (y) or zoom in (n)?', 'y') + if zoom_flag: + x_upp = input_int(' Set upper data bound', ge=x_upp_min, le=len_a) + break + else: + x_min = input_int(' Set upper zoom index', ge=x_upp_min, le=len_a-1) + x_max = input_int(' Set upper zoom index', ge=x_min+1, le=len_a) + else: + if not is_int(x_upp, ge=x_low+num_x_min, le=len_a): + illegal_value(x_upp, 'x_upp', 'select_array_bounds') + return(None) + print(f'lower bound = {x_low} (inclusive)\nupper bound = {x_upp} (exclusive)]') + quick_plot((range(len_a), a), vlines=(x_low,x_upp), title=title) + if not input_yesno('Accept these bounds (y/n)?', 'y'): + x_low, x_upp = select_array_bounds(a, None, None, num_x_min, title=title) + clear_plot(title) + return(x_low, x_upp) + +def select_image_bounds(a, axis, low=None, upp=None, num_min=None, title='select array bounds', + raise_error=False): + """Interactively select the lower and upper data bounds for a 2D numpy array. + """ + a = np.asarray(a) + if a.ndim != 2: + illegal_value(a.ndim, 'array dimension', location='select_image_bounds', + raise_error=raise_error) + return(None) + if axis < 0 or axis >= a.ndim: + illegal_value(axis, 'axis', location='select_image_bounds', raise_error=raise_error) + return(None) + low_save = low + upp_save = upp + num_min_save = num_min + if num_min is None: + num_min = 1 + else: + if num_min < 2 or num_min > a.shape[axis]: + logger.warning('Invalid input for num_min in select_image_bounds, input ignored') + num_min = 1 + if low is None: + min_ = 0 + max_ = a.shape[axis] + low_max = a.shape[axis]-num_min + while True: + if axis: + quick_imshow(a[:,min_:max_], title=title, aspect='auto', + extent=[min_,max_,a.shape[0],0]) + else: + quick_imshow(a[min_:max_,:], title=title, aspect='auto', + extent=[0,a.shape[1], max_,min_]) + zoom_flag = input_yesno('Set lower data bound (y) or zoom in (n)?', 'y') + if zoom_flag: + low = input_int(' Set lower data bound', ge=0, le=low_max) + break + else: + min_ = input_int(' Set lower zoom index', ge=0, le=low_max) + max_ = input_int(' Set upper zoom index', ge=min_+1, le=low_max+1) + else: + if not is_int(low, ge=0, le=a.shape[axis]-num_min): + illegal_value(low, 'low', location='select_image_bounds', raise_error=raise_error) + return(None) + if upp is None: + min_ = low+num_min + max_ = a.shape[axis] + upp_min = min_ + while True: + if axis: + quick_imshow(a[:,min_:max_], title=title, aspect='auto', + extent=[min_,max_,a.shape[0],0]) + else: + quick_imshow(a[min_:max_,:], title=title, aspect='auto', + extent=[0,a.shape[1], max_,min_]) + zoom_flag = input_yesno('Set upper data bound (y) or zoom in (n)?', 'y') + if zoom_flag: + upp = input_int(' Set upper data bound', ge=upp_min, le=a.shape[axis]) + break + else: + min_ = input_int(' Set upper zoom index', ge=upp_min, le=a.shape[axis]-1) + max_ = input_int(' Set upper zoom index', ge=min_+1, le=a.shape[axis]) + else: + if not is_int(upp, ge=low+num_min, le=a.shape[axis]): + illegal_value(upp, 'upp', location='select_image_bounds', raise_error=raise_error) + return(None) + bounds = (low, upp) + a_tmp = np.copy(a) + a_tmp_max = a.max() + if axis: + a_tmp[:,bounds[0]] = a_tmp_max + a_tmp[:,bounds[1]-1] = a_tmp_max + else: + a_tmp[bounds[0],:] = a_tmp_max + a_tmp[bounds[1]-1,:] = a_tmp_max + print(f'lower bound = {low} (inclusive)\nupper bound = {upp} (exclusive)') + quick_imshow(a_tmp, title=title, aspect='auto') + del a_tmp + if not input_yesno('Accept these bounds (y/n)?', 'y'): + bounds = select_image_bounds(a, axis, low=low_save, upp=upp_save, num_min=num_min_save, + title=title) + return(bounds) + +def select_one_image_bound(a, axis, bound=None, bound_name=None, title='select array bounds', + default='y', raise_error=False): + """Interactively select a data boundary for a 2D numpy array. + """ + a = np.asarray(a) + if a.ndim != 2: + illegal_value(a.ndim, 'array dimension', location='select_one_image_bound', + raise_error=raise_error) + return(None) + if axis < 0 or axis >= a.ndim: + illegal_value(axis, 'axis', location='select_one_image_bound', raise_error=raise_error) + return(None) + if bound_name is None: + bound_name = 'data bound' + if bound is None: + min_ = 0 + max_ = a.shape[axis] + bound_max = a.shape[axis]-1 + while True: + if axis: + quick_imshow(a[:,min_:max_], title=title, aspect='auto', + extent=[min_,max_,a.shape[0],0]) + else: + quick_imshow(a[min_:max_,:], title=title, aspect='auto', + extent=[0,a.shape[1], max_,min_]) + zoom_flag = input_yesno(f'Set {bound_name} (y) or zoom in (n)?', 'y') + if zoom_flag: + bound = input_int(f' Set {bound_name}', ge=0, le=bound_max) + clear_imshow(title) + break + else: + min_ = input_int(' Set lower zoom index', ge=0, le=bound_max) + max_ = input_int(' Set upper zoom index', ge=min_+1, le=bound_max+1) + + elif not is_int(bound, ge=0, le=a.shape[axis]-1): + illegal_value(bound, 'bound', location='select_one_image_bound', raise_error=raise_error) + return(None) + else: + print(f'Current {bound_name} = {bound}') + a_tmp = np.copy(a) + a_tmp_max = a.max() + if axis: + a_tmp[:,bound] = a_tmp_max + else: + a_tmp[bound,:] = a_tmp_max + quick_imshow(a_tmp, title=title, aspect='auto') + del a_tmp + if not input_yesno(f'Accept this {bound_name} (y/n)?', default): + bound = select_one_image_bound(a, axis, bound_name=bound_name, title=title) + clear_imshow(title) + return(bound) + + +class Config: + """Base class for processing a config file or dictionary. + """ + def __init__(self, config_file=None, config_dict=None): + self.config = {} + self.load_flag = False + self.suffix = None + + # Load config file + if config_file is not None and config_dict is not None: + logger.warning('Ignoring config_dict (both config_file and config_dict are specified)') + if config_file is not None: + self.load_file(config_file) + elif config_dict is not None: + self.load_dict(config_dict) + + def load_file(self, config_file): + """Load a config file. + """ + if self.load_flag: + logger.warning('Overwriting any previously loaded config file') + self.config = {} + + # Ensure config file exists + if not os.path.isfile(config_file): + logger.error(f'Unable to load {config_file}') + return + + # Load config file (for now for Galaxy, allow .dat extension) + self.suffix = os.path.splitext(config_file)[1] + if self.suffix == '.yml' or self.suffix == '.yaml' or self.suffix == '.dat': + with open(config_file, 'r') as f: + self.config = safe_load(f) + elif self.suffix == '.txt': + with open(config_file, 'r') as f: + lines = f.read().splitlines() + self.config = {item[0].strip():literal_eval(item[1].strip()) for item in + [line.split('#')[0].split('=') for line in lines if '=' in line.split('#')[0]]} + else: + illegal_value(self.suffix, 'config file extension', 'Config.load_file') + + # Make sure config file was correctly loaded + if isinstance(self.config, dict): + self.load_flag = True + else: + logger.error(f'Unable to load dictionary from config file: {config_file}') + self.config = {} + + def load_dict(self, config_dict): + """Takes a dictionary and places it into self.config. + """ + if self.load_flag: + logger.warning('Overwriting the previously loaded config file') + + if isinstance(config_dict, dict): + self.config = config_dict + self.load_flag = True + else: + illegal_value(config_dict, 'dictionary config object', 'Config.load_dict') + self.config = {} + + def save_file(self, config_file): + """Save the config file (as a yaml file only right now). + """ + suffix = os.path.splitext(config_file)[1] + if suffix != '.yml' and suffix != '.yaml': + illegal_value(suffix, 'config file extension', 'Config.save_file') + + # Check if config file exists + if os.path.isfile(config_file): + logger.info(f'Updating {config_file}') + else: + logger.info(f'Saving {config_file}') + + # Save config file + with open(config_file, 'w') as f: + safe_dump(self.config, f) + + def validate(self, pars_required, pars_missing=None): + """Returns False if any required keys are missing. + """ + if not self.load_flag: + logger.error('Load a config file prior to calling Config.validate') + + def validate_nested_pars(config, par): + par_levels = par.split(':') + first_level_par = par_levels[0] + try: + first_level_par = int(first_level_par) + except: + pass + try: + next_level_config = config[first_level_par] + if len(par_levels) > 1: + next_level_par = ':'.join(par_levels[1:]) + return(validate_nested_pars(next_level_config, next_level_par)) + else: + return(True) + except: + return(False) + + pars_missing = [p for p in pars_required if not validate_nested_pars(self.config, p)] + if len(pars_missing) > 0: + logger.error(f'Missing item(s) in configuration: {", ".join(pars_missing)}') + return(False) + else: + return(True)