Mercurial > repos > rv43 > test_tomo_reconstruct
comparison fit.py @ 0:98e23dff1de2 draft default tip
planemo upload for repository https://github.com/rolfverberg/galaxytools commit f8c4bdb31c20c468045ad5e6eb255a293244bc6c-dirty
| author | rv43 |
|---|---|
| date | Tue, 21 Mar 2023 16:22:42 +0000 |
| parents | |
| children |
comparison
equal
deleted
inserted
replaced
| -1:000000000000 | 0:98e23dff1de2 |
|---|---|
| 1 #!/usr/bin/env python3 | |
| 2 | |
| 3 # -*- coding: utf-8 -*- | |
| 4 """ | |
| 5 Created on Mon Dec 6 15:36:22 2021 | |
| 6 | |
| 7 @author: rv43 | |
| 8 """ | |
| 9 | |
| 10 import logging | |
| 11 | |
| 12 from asteval import Interpreter, get_ast_names | |
| 13 from copy import deepcopy | |
| 14 from lmfit import Model, Parameters | |
| 15 from lmfit.model import ModelResult | |
| 16 from lmfit.models import ConstantModel, LinearModel, QuadraticModel, PolynomialModel,\ | |
| 17 ExponentialModel, StepModel, RectangleModel, ExpressionModel, GaussianModel,\ | |
| 18 LorentzianModel | |
| 19 import numpy as np | |
| 20 from os import cpu_count, getpid, listdir, mkdir, path | |
| 21 from re import compile, sub | |
| 22 from shutil import rmtree | |
| 23 try: | |
| 24 from sympy import diff, simplify | |
| 25 except: | |
| 26 pass | |
| 27 try: | |
| 28 from joblib import Parallel, delayed | |
| 29 have_joblib = True | |
| 30 except: | |
| 31 have_joblib = False | |
| 32 try: | |
| 33 import xarray as xr | |
| 34 have_xarray = True | |
| 35 except: | |
| 36 have_xarray = False | |
| 37 | |
| 38 try: | |
| 39 from .general import illegal_value, is_int, is_dict_series, is_index, index_nearest, \ | |
| 40 almost_equal, quick_plot #, eval_expr | |
| 41 except: | |
| 42 try: | |
| 43 from sys import path as syspath | |
| 44 syspath.append(f'/nfs/chess/user/rv43/msnctools/msnctools') | |
| 45 from general import illegal_value, is_int, is_dict_series, is_index, index_nearest, \ | |
| 46 almost_equal, quick_plot #, eval_expr | |
| 47 except: | |
| 48 from general import illegal_value, is_int, is_dict_series, is_index, index_nearest, \ | |
| 49 almost_equal, quick_plot #, eval_expr | |
| 50 | |
| 51 from sys import float_info | |
| 52 float_min = float_info.min | |
| 53 float_max = float_info.max | |
| 54 | |
| 55 # sigma = fwhm_factor*fwhm | |
| 56 fwhm_factor = { | |
| 57 'gaussian': f'fwhm/(2*sqrt(2*log(2)))', | |
| 58 'lorentzian': f'0.5*fwhm', | |
| 59 'splitlorentzian': f'0.5*fwhm', # sigma = sigma_r | |
| 60 'voight': f'0.2776*fwhm', # sigma = gamma | |
| 61 'pseudovoight': f'0.5*fwhm'} # fraction = 0.5 | |
| 62 | |
| 63 # amplitude = height_factor*height*fwhm | |
| 64 height_factor = { | |
| 65 'gaussian': f'height*fwhm*0.5*sqrt(pi/log(2))', | |
| 66 'lorentzian': f'height*fwhm*0.5*pi', | |
| 67 'splitlorentzian': f'height*fwhm*0.5*pi', # sigma = sigma_r | |
| 68 'voight': f'3.334*height*fwhm', # sigma = gamma | |
| 69 'pseudovoight': f'1.268*height*fwhm'} # fraction = 0.5 | |
| 70 | |
| 71 class Fit: | |
| 72 """Wrapper class for lmfit | |
| 73 """ | |
| 74 def __init__(self, y, x=None, models=None, normalize=True, **kwargs): | |
| 75 if not isinstance(normalize, bool): | |
| 76 raise ValueError(f'Invalid parameter normalize ({normalize})') | |
| 77 self._mask = None | |
| 78 self._model = None | |
| 79 self._norm = None | |
| 80 self._normalized = False | |
| 81 self._parameters = Parameters() | |
| 82 self._parameter_bounds = None | |
| 83 self._parameter_norms = {} | |
| 84 self._linear_parameters = [] | |
| 85 self._nonlinear_parameters = [] | |
| 86 self._result = None | |
| 87 self._try_linear_fit = True | |
| 88 self._y = None | |
| 89 self._y_norm = None | |
| 90 self._y_range = None | |
| 91 if 'try_linear_fit' in kwargs: | |
| 92 try_linear_fit = kwargs.pop('try_linear_fit') | |
| 93 if not isinstance(try_linear_fit, bool): | |
| 94 illegal_value(try_linear_fit, 'try_linear_fit', 'Fit.fit', raise_error=True) | |
| 95 self._try_linear_fit = try_linear_fit | |
| 96 if y is not None: | |
| 97 if isinstance(y, (tuple, list, np.ndarray)): | |
| 98 self._x = np.asarray(x) | |
| 99 elif have_xarray and isinstance(y, xr.DataArray): | |
| 100 if x is not None: | |
| 101 logging.warning('Ignoring superfluous input x ({x}) in Fit.__init__') | |
| 102 if y.ndim != 1: | |
| 103 illegal_value(y.ndim, 'DataArray dimensions', 'Fit:__init__', raise_error=True) | |
| 104 self._x = np.asarray(y[y.dims[0]]) | |
| 105 else: | |
| 106 illegal_value(y, 'y', 'Fit:__init__', raise_error=True) | |
| 107 self._y = y | |
| 108 if self._x.ndim != 1: | |
| 109 raise ValueError(f'Invalid dimension for input x ({self._x.ndim})') | |
| 110 if self._x.size != self._y.size: | |
| 111 raise ValueError(f'Inconsistent x and y dimensions ({self._x.size} vs '+ | |
| 112 f'{self._y.size})') | |
| 113 if 'mask' in kwargs: | |
| 114 self._mask = kwargs.pop('mask') | |
| 115 if self._mask is None: | |
| 116 y_min = float(self._y.min()) | |
| 117 self._y_range = float(self._y.max())-y_min | |
| 118 if normalize and self._y_range > 0.0: | |
| 119 self._norm = (y_min, self._y_range) | |
| 120 else: | |
| 121 self._mask = np.asarray(self._mask).astype(bool) | |
| 122 if self._x.size != self._mask.size: | |
| 123 raise ValueError(f'Inconsistent x and mask dimensions ({self._x.size} vs '+ | |
| 124 f'{self._mask.size})') | |
| 125 y_masked = np.asarray(self._y)[~self._mask] | |
| 126 y_min = float(y_masked.min()) | |
| 127 self._y_range = float(y_masked.max())-y_min | |
| 128 if normalize and self._y_range > 0.0: | |
| 129 if normalize and self._y_range > 0.0: | |
| 130 self._norm = (y_min, self._y_range) | |
| 131 if models is not None: | |
| 132 if callable(models) or isinstance(models, str): | |
| 133 kwargs = self.add_model(models, **kwargs) | |
| 134 elif isinstance(models, (tuple, list)): | |
| 135 for model in models: | |
| 136 kwargs = self.add_model(model, **kwargs) | |
| 137 self.fit(**kwargs) | |
| 138 | |
| 139 @classmethod | |
| 140 def fit_data(cls, y, models, x=None, normalize=True, **kwargs): | |
| 141 return(cls(y, x=x, models=models, normalize=normalize, **kwargs)) | |
| 142 | |
| 143 @property | |
| 144 def best_errors(self): | |
| 145 if self._result is None: | |
| 146 return(None) | |
| 147 return({name:self._result.params[name].stderr for name in sorted(self._result.params) | |
| 148 if name != 'tmp_normalization_offset_c'}) | |
| 149 | |
| 150 @property | |
| 151 def best_fit(self): | |
| 152 if self._result is None: | |
| 153 return(None) | |
| 154 return(self._result.best_fit) | |
| 155 | |
| 156 @property | |
| 157 def best_parameters(self): | |
| 158 if self._result is None: | |
| 159 return(None) | |
| 160 parameters = {} | |
| 161 for name in sorted(self._result.params): | |
| 162 if name != 'tmp_normalization_offset_c': | |
| 163 par = self._result.params[name] | |
| 164 parameters[name] = {'value': par.value, 'error': par.stderr, | |
| 165 'init_value': par.init_value, 'min': par.min, 'max': par.max, | |
| 166 'vary': par.vary, 'expr': par.expr} | |
| 167 return(parameters) | |
| 168 | |
| 169 @property | |
| 170 def best_results(self): | |
| 171 """Convert the input data array to a data set and add the fit results. | |
| 172 """ | |
| 173 if self._result is None: | |
| 174 return(None) | |
| 175 if isinstance(self._y, xr.DataArray): | |
| 176 best_results = self._y.to_dataset() | |
| 177 dims = self._y.dims | |
| 178 fit_name = f'{self._y.name}_fit' | |
| 179 else: | |
| 180 coords = {'x': (['x'], self._x)} | |
| 181 dims = ('x') | |
| 182 best_results = xr.Dataset(coords=coords) | |
| 183 best_results['y'] = (dims, self._y) | |
| 184 fit_name = 'y_fit' | |
| 185 best_results[fit_name] = (dims, self.best_fit) | |
| 186 if self._mask is not None: | |
| 187 best_results['mask'] = self._mask | |
| 188 best_results.coords['par_names'] = ('peak', [name for name in self.best_values.keys()]) | |
| 189 best_results['best_values'] = (['par_names'], [v for v in self.best_values.values()]) | |
| 190 best_results['best_errors'] = (['par_names'], [v for v in self.best_errors.values()]) | |
| 191 best_results.attrs['components'] = self.components | |
| 192 return(best_results) | |
| 193 | |
| 194 @property | |
| 195 def best_values(self): | |
| 196 if self._result is None: | |
| 197 return(None) | |
| 198 return({name:self._result.params[name].value for name in sorted(self._result.params) | |
| 199 if name != 'tmp_normalization_offset_c'}) | |
| 200 | |
| 201 @property | |
| 202 def chisqr(self): | |
| 203 if self._result is None: | |
| 204 return(None) | |
| 205 return(self._result.chisqr) | |
| 206 | |
| 207 @property | |
| 208 def components(self): | |
| 209 components = {} | |
| 210 if self._result is None: | |
| 211 logging.warning('Unable to collect components in Fit.components') | |
| 212 return(components) | |
| 213 for component in self._result.components: | |
| 214 if 'tmp_normalization_offset_c' in component.param_names: | |
| 215 continue | |
| 216 parameters = {} | |
| 217 for name in component.param_names: | |
| 218 par = self._parameters[name] | |
| 219 parameters[name] = {'free': par.vary, 'value': self._result.params[name].value} | |
| 220 if par.expr is not None: | |
| 221 parameters[name]['expr'] = par.expr | |
| 222 expr = None | |
| 223 if isinstance(component, ExpressionModel): | |
| 224 name = component._name | |
| 225 if name[-1] == '_': | |
| 226 name = name[:-1] | |
| 227 expr = component.expr | |
| 228 else: | |
| 229 prefix = component.prefix | |
| 230 if len(prefix): | |
| 231 if prefix[-1] == '_': | |
| 232 prefix = prefix[:-1] | |
| 233 name = f'{prefix} ({component._name})' | |
| 234 else: | |
| 235 name = f'{component._name}' | |
| 236 if expr is None: | |
| 237 components[name] = {'parameters': parameters} | |
| 238 else: | |
| 239 components[name] = {'expr': expr, 'parameters': parameters} | |
| 240 return(components) | |
| 241 | |
| 242 @property | |
| 243 def covar(self): | |
| 244 if self._result is None: | |
| 245 return(None) | |
| 246 return(self._result.covar) | |
| 247 | |
| 248 @property | |
| 249 def init_parameters(self): | |
| 250 if self._result is None or self._result.init_params is None: | |
| 251 return(None) | |
| 252 parameters = {} | |
| 253 for name in sorted(self._result.init_params): | |
| 254 if name != 'tmp_normalization_offset_c': | |
| 255 par = self._result.init_params[name] | |
| 256 parameters[name] = {'value': par.value, 'min': par.min, 'max': par.max, | |
| 257 'vary': par.vary, 'expr': par.expr} | |
| 258 return(parameters) | |
| 259 | |
| 260 @property | |
| 261 def init_values(self): | |
| 262 if self._result is None or self._result.init_params is None: | |
| 263 return(None) | |
| 264 return({name:self._result.init_params[name].value for name in | |
| 265 sorted(self._result.init_params) if name != 'tmp_normalization_offset_c'}) | |
| 266 | |
| 267 @property | |
| 268 def normalization_offset(self): | |
| 269 if self._result is None: | |
| 270 return(None) | |
| 271 if self._norm is None: | |
| 272 return(0.0) | |
| 273 else: | |
| 274 if self._result.init_params is not None: | |
| 275 normalization_offset = self._result.init_params['tmp_normalization_offset_c'] | |
| 276 else: | |
| 277 normalization_offset = self._result.params['tmp_normalization_offset_c'] | |
| 278 return(normalization_offset) | |
| 279 | |
| 280 @property | |
| 281 def num_func_eval(self): | |
| 282 if self._result is None: | |
| 283 return(None) | |
| 284 return(self._result.nfev) | |
| 285 | |
| 286 @property | |
| 287 def parameters(self): | |
| 288 return({name:{'min': par.min, 'max': par.max, 'vary': par.vary, 'expr': par.expr} | |
| 289 for name, par in self._parameters.items() if name != 'tmp_normalization_offset_c'}) | |
| 290 | |
| 291 @property | |
| 292 def redchi(self): | |
| 293 if self._result is None: | |
| 294 return(None) | |
| 295 return(self._result.redchi) | |
| 296 | |
| 297 @property | |
| 298 def residual(self): | |
| 299 if self._result is None: | |
| 300 return(None) | |
| 301 return(self._result.residual) | |
| 302 | |
| 303 @property | |
| 304 def success(self): | |
| 305 if self._result is None: | |
| 306 return(None) | |
| 307 if not self._result.success: | |
| 308 # print(f'ier = {self._result.ier}') | |
| 309 # print(f'lmdif_message = {self._result.lmdif_message}') | |
| 310 # print(f'message = {self._result.message}') | |
| 311 # print(f'nfev = {self._result.nfev}') | |
| 312 # print(f'redchi = {self._result.redchi}') | |
| 313 # print(f'success = {self._result.success}') | |
| 314 if self._result.ier == 0 or self._result.ier == 5: | |
| 315 logging.warning(f'ier = {self._result.ier}: {self._result.message}') | |
| 316 else: | |
| 317 logging.warning(f'ier = {self._result.ier}: {self._result.message}') | |
| 318 return(True) | |
| 319 # self.print_fit_report() | |
| 320 # self.plot() | |
| 321 return(self._result.success) | |
| 322 | |
| 323 @property | |
| 324 def var_names(self): | |
| 325 """Intended to be used with covar | |
| 326 """ | |
| 327 if self._result is None: | |
| 328 return(None) | |
| 329 return(getattr(self._result, 'var_names', None)) | |
| 330 | |
| 331 @property | |
| 332 def x(self): | |
| 333 return(self._x) | |
| 334 | |
| 335 @property | |
| 336 def y(self): | |
| 337 return(self._y) | |
| 338 | |
| 339 def print_fit_report(self, result=None, show_correl=False): | |
| 340 if result is None: | |
| 341 result = self._result | |
| 342 if result is not None: | |
| 343 print(result.fit_report(show_correl=show_correl)) | |
| 344 | |
| 345 def add_parameter(self, **parameter): | |
| 346 if not isinstance(parameter, dict): | |
| 347 raise ValueError(f'Invalid parameter ({parameter})') | |
| 348 if parameter.get('expr') is not None: | |
| 349 raise KeyError(f'Illegal "expr" key in parameter {parameter}') | |
| 350 name = parameter['name'] | |
| 351 if not isinstance(name, str): | |
| 352 raise ValueError(f'Illegal "name" value ({name}) in parameter {parameter}') | |
| 353 if parameter.get('norm') is None: | |
| 354 self._parameter_norms[name] = False | |
| 355 else: | |
| 356 norm = parameter.pop('norm') | |
| 357 if self._norm is None: | |
| 358 logging.warning(f'Ignoring norm in parameter {name} in '+ | |
| 359 f'Fit.add_parameter (normalization is turned off)') | |
| 360 self._parameter_norms[name] = False | |
| 361 else: | |
| 362 if not isinstance(norm, bool): | |
| 363 raise ValueError(f'Illegal "norm" value ({norm}) in parameter {parameter}') | |
| 364 self._parameter_norms[name] = norm | |
| 365 vary = parameter.get('vary') | |
| 366 if vary is not None: | |
| 367 if not isinstance(vary, bool): | |
| 368 raise ValueError(f'Illegal "vary" value ({vary}) in parameter {parameter}') | |
| 369 if not vary: | |
| 370 if 'min' in parameter: | |
| 371 logging.warning(f'Ignoring min in parameter {name} in '+ | |
| 372 f'Fit.add_parameter (vary = {vary})') | |
| 373 parameter.pop('min') | |
| 374 if 'max' in parameter: | |
| 375 logging.warning(f'Ignoring max in parameter {name} in '+ | |
| 376 f'Fit.add_parameter (vary = {vary})') | |
| 377 parameter.pop('max') | |
| 378 if self._norm is not None and name not in self._parameter_norms: | |
| 379 raise ValueError(f'Missing parameter normalization type for paremeter {name}') | |
| 380 self._parameters.add(**parameter) | |
| 381 | |
| 382 def add_model(self, model, prefix=None, parameters=None, parameter_norms=None, **kwargs): | |
| 383 # Create the new model | |
| 384 # print(f'at start add_model:\nself._parameters:\n{self._parameters}') | |
| 385 # print(f'at start add_model: kwargs = {kwargs}') | |
| 386 # print(f'parameters = {parameters}') | |
| 387 # print(f'parameter_norms = {parameter_norms}') | |
| 388 # if len(self._parameters.keys()): | |
| 389 # print('\nAt start adding model:') | |
| 390 # self._parameters.pretty_print() | |
| 391 # print(f'parameter_norms:\n{self._parameter_norms}') | |
| 392 if prefix is not None and not isinstance(prefix, str): | |
| 393 logging.warning('Ignoring illegal prefix: {model} {type(model)}') | |
| 394 prefix = None | |
| 395 if prefix is None: | |
| 396 pprefix = '' | |
| 397 else: | |
| 398 pprefix = prefix | |
| 399 if parameters is not None: | |
| 400 if isinstance(parameters, dict): | |
| 401 parameters = (parameters, ) | |
| 402 elif not is_dict_series(parameters): | |
| 403 illegal_value(parameters, 'parameters', 'Fit.add_model', raise_error=True) | |
| 404 parameters = deepcopy(parameters) | |
| 405 if parameter_norms is not None: | |
| 406 if isinstance(parameter_norms, dict): | |
| 407 parameter_norms = (parameter_norms, ) | |
| 408 if not is_dict_series(parameter_norms): | |
| 409 illegal_value(parameter_norms, 'parameter_norms', 'Fit.add_model', raise_error=True) | |
| 410 new_parameter_norms = {} | |
| 411 if callable(model): | |
| 412 # Linear fit not yet implemented for callable models | |
| 413 self._try_linear_fit = False | |
| 414 if parameter_norms is None: | |
| 415 if parameters is None: | |
| 416 raise ValueError('Either "parameters" or "parameter_norms" is required in '+ | |
| 417 f'{model}') | |
| 418 for par in parameters: | |
| 419 name = par['name'] | |
| 420 if not isinstance(name, str): | |
| 421 raise ValueError(f'Illegal "name" value ({name}) in input parameters') | |
| 422 if par.get('norm') is not None: | |
| 423 norm = par.pop('norm') | |
| 424 if not isinstance(norm, bool): | |
| 425 raise ValueError(f'Illegal "norm" value ({norm}) in input parameters') | |
| 426 new_parameter_norms[f'{pprefix}{name}'] = norm | |
| 427 else: | |
| 428 for par in parameter_norms: | |
| 429 name = par['name'] | |
| 430 if not isinstance(name, str): | |
| 431 raise ValueError(f'Illegal "name" value ({name}) in input parameters') | |
| 432 norm = par.get('norm') | |
| 433 if norm is None or not isinstance(norm, bool): | |
| 434 raise ValueError(f'Illegal "norm" value ({norm}) in input parameters') | |
| 435 new_parameter_norms[f'{pprefix}{name}'] = norm | |
| 436 if parameters is not None: | |
| 437 for par in parameters: | |
| 438 if par.get('expr') is not None: | |
| 439 raise KeyError(f'Illegal "expr" key ({par.get("expr")}) in parameter '+ | |
| 440 f'{name} for a callable model {model}') | |
| 441 name = par['name'] | |
| 442 if not isinstance(name, str): | |
| 443 raise ValueError(f'Illegal "name" value ({name}) in input parameters') | |
| 444 # RV FIX callable model will need partial deriv functions for any linear pars to get the linearized matrix, so for now skip linear solution option | |
| 445 newmodel = Model(model, prefix=prefix) | |
| 446 elif isinstance(model, str): | |
| 447 if model == 'constant': # Par: c | |
| 448 newmodel = ConstantModel(prefix=prefix) | |
| 449 new_parameter_norms[f'{pprefix}c'] = True | |
| 450 self._linear_parameters.append(f'{pprefix}c') | |
| 451 elif model == 'linear': # Par: slope, intercept | |
| 452 newmodel = LinearModel(prefix=prefix) | |
| 453 new_parameter_norms[f'{pprefix}slope'] = True | |
| 454 new_parameter_norms[f'{pprefix}intercept'] = True | |
| 455 self._linear_parameters.append(f'{pprefix}slope') | |
| 456 self._linear_parameters.append(f'{pprefix}intercept') | |
| 457 elif model == 'quadratic': # Par: a, b, c | |
| 458 newmodel = QuadraticModel(prefix=prefix) | |
| 459 new_parameter_norms[f'{pprefix}a'] = True | |
| 460 new_parameter_norms[f'{pprefix}b'] = True | |
| 461 new_parameter_norms[f'{pprefix}c'] = True | |
| 462 self._linear_parameters.append(f'{pprefix}a') | |
| 463 self._linear_parameters.append(f'{pprefix}b') | |
| 464 self._linear_parameters.append(f'{pprefix}c') | |
| 465 elif model == 'gaussian': # Par: amplitude, center, sigma (fwhm, height) | |
| 466 newmodel = GaussianModel(prefix=prefix) | |
| 467 new_parameter_norms[f'{pprefix}amplitude'] = True | |
| 468 new_parameter_norms[f'{pprefix}center'] = False | |
| 469 new_parameter_norms[f'{pprefix}sigma'] = False | |
| 470 self._linear_parameters.append(f'{pprefix}amplitude') | |
| 471 self._nonlinear_parameters.append(f'{pprefix}center') | |
| 472 self._nonlinear_parameters.append(f'{pprefix}sigma') | |
| 473 # parameter norms for height and fwhm are needed to get correct errors | |
| 474 new_parameter_norms[f'{pprefix}height'] = True | |
| 475 new_parameter_norms[f'{pprefix}fwhm'] = False | |
| 476 elif model == 'lorentzian': # Par: amplitude, center, sigma (fwhm, height) | |
| 477 newmodel = LorentzianModel(prefix=prefix) | |
| 478 new_parameter_norms[f'{pprefix}amplitude'] = True | |
| 479 new_parameter_norms[f'{pprefix}center'] = False | |
| 480 new_parameter_norms[f'{pprefix}sigma'] = False | |
| 481 self._linear_parameters.append(f'{pprefix}amplitude') | |
| 482 self._nonlinear_parameters.append(f'{pprefix}center') | |
| 483 self._nonlinear_parameters.append(f'{pprefix}sigma') | |
| 484 # parameter norms for height and fwhm are needed to get correct errors | |
| 485 new_parameter_norms[f'{pprefix}height'] = True | |
| 486 new_parameter_norms[f'{pprefix}fwhm'] = False | |
| 487 elif model == 'exponential': # Par: amplitude, decay | |
| 488 newmodel = ExponentialModel(prefix=prefix) | |
| 489 new_parameter_norms[f'{pprefix}amplitude'] = True | |
| 490 new_parameter_norms[f'{pprefix}decay'] = False | |
| 491 self._linear_parameters.append(f'{pprefix}amplitude') | |
| 492 self._nonlinear_parameters.append(f'{pprefix}decay') | |
| 493 elif model == 'step': # Par: amplitude, center, sigma | |
| 494 form = kwargs.get('form') | |
| 495 if form is not None: | |
| 496 kwargs.pop('form') | |
| 497 if form is None or form not in ('linear', 'atan', 'arctan', 'erf', 'logistic'): | |
| 498 raise ValueError(f'Invalid parameter form for build-in step model ({form})') | |
| 499 newmodel = StepModel(prefix=prefix, form=form) | |
| 500 new_parameter_norms[f'{pprefix}amplitude'] = True | |
| 501 new_parameter_norms[f'{pprefix}center'] = False | |
| 502 new_parameter_norms[f'{pprefix}sigma'] = False | |
| 503 self._linear_parameters.append(f'{pprefix}amplitude') | |
| 504 self._nonlinear_parameters.append(f'{pprefix}center') | |
| 505 self._nonlinear_parameters.append(f'{pprefix}sigma') | |
| 506 elif model == 'rectangle': # Par: amplitude, center1, center2, sigma1, sigma2 | |
| 507 form = kwargs.get('form') | |
| 508 if form is not None: | |
| 509 kwargs.pop('form') | |
| 510 if form is None or form not in ('linear', 'atan', 'arctan', 'erf', 'logistic'): | |
| 511 raise ValueError('Invalid parameter form for build-in rectangle model '+ | |
| 512 f'({form})') | |
| 513 newmodel = RectangleModel(prefix=prefix, form=form) | |
| 514 new_parameter_norms[f'{pprefix}amplitude'] = True | |
| 515 new_parameter_norms[f'{pprefix}center1'] = False | |
| 516 new_parameter_norms[f'{pprefix}center2'] = False | |
| 517 new_parameter_norms[f'{pprefix}sigma1'] = False | |
| 518 new_parameter_norms[f'{pprefix}sigma2'] = False | |
| 519 self._linear_parameters.append(f'{pprefix}amplitude') | |
| 520 self._nonlinear_parameters.append(f'{pprefix}center1') | |
| 521 self._nonlinear_parameters.append(f'{pprefix}center2') | |
| 522 self._nonlinear_parameters.append(f'{pprefix}sigma1') | |
| 523 self._nonlinear_parameters.append(f'{pprefix}sigma2') | |
| 524 elif model == 'expression': # Par: by expression | |
| 525 expr = kwargs['expr'] | |
| 526 if not isinstance(expr, str): | |
| 527 raise ValueError(f'Illegal "expr" value ({expr}) in {model}') | |
| 528 kwargs.pop('expr') | |
| 529 if parameter_norms is not None: | |
| 530 logging.warning('Ignoring parameter_norms (normalization determined from '+ | |
| 531 'linearity)}') | |
| 532 if parameters is not None: | |
| 533 for par in parameters: | |
| 534 if par.get('expr') is not None: | |
| 535 raise KeyError(f'Illegal "expr" key ({par.get("expr")}) in parameter '+ | |
| 536 f'({par}) for an expression model') | |
| 537 if par.get('norm') is not None: | |
| 538 logging.warning(f'Ignoring "norm" key in parameter ({par}) '+ | |
| 539 '(normalization determined from linearity)}') | |
| 540 par.pop('norm') | |
| 541 name = par['name'] | |
| 542 if not isinstance(name, str): | |
| 543 raise ValueError(f'Illegal "name" value ({name}) in input parameters') | |
| 544 ast = Interpreter() | |
| 545 expr_parameters = [name for name in get_ast_names(ast.parse(expr)) | |
| 546 if name != 'x' and name not in self._parameters | |
| 547 and name not in ast.symtable] | |
| 548 # print(f'\nexpr_parameters: {expr_parameters}') | |
| 549 # print(f'expr = {expr}') | |
| 550 if prefix is None: | |
| 551 newmodel = ExpressionModel(expr=expr) | |
| 552 else: | |
| 553 for name in expr_parameters: | |
| 554 expr = sub(rf'\b{name}\b', f'{prefix}{name}', expr) | |
| 555 expr_parameters = [f'{prefix}{name}' for name in expr_parameters] | |
| 556 # print(f'\nexpr_parameters: {expr_parameters}') | |
| 557 # print(f'expr = {expr}') | |
| 558 newmodel = ExpressionModel(expr=expr, name=name) | |
| 559 # print(f'\nnewmodel = {newmodel.__dict__}') | |
| 560 # print(f'params_names = {newmodel._param_names}') | |
| 561 # print(f'params_names = {newmodel.param_names}') | |
| 562 # Remove already existing names | |
| 563 for name in newmodel.param_names.copy(): | |
| 564 if name not in expr_parameters: | |
| 565 newmodel._func_allargs.remove(name) | |
| 566 newmodel._param_names.remove(name) | |
| 567 # print(f'params_names = {newmodel._param_names}') | |
| 568 # print(f'params_names = {newmodel.param_names}') | |
| 569 else: | |
| 570 raise ValueError(f'Unknown build-in fit model ({model})') | |
| 571 else: | |
| 572 illegal_value(model, 'model', 'Fit.add_model', raise_error=True) | |
| 573 | |
| 574 # Add the new model to the current one | |
| 575 # print('\nBefore adding model:') | |
| 576 # print(f'\nnewmodel = {newmodel.__dict__}') | |
| 577 # if len(self._parameters): | |
| 578 # self._parameters.pretty_print() | |
| 579 if self._model is None: | |
| 580 self._model = newmodel | |
| 581 else: | |
| 582 self._model += newmodel | |
| 583 new_parameters = newmodel.make_params() | |
| 584 self._parameters += new_parameters | |
| 585 # print('\nAfter adding model:') | |
| 586 # print(f'\nnewmodel = {newmodel.__dict__}') | |
| 587 # print(f'\nnew_parameters = {new_parameters}') | |
| 588 # self._parameters.pretty_print() | |
| 589 | |
| 590 # Check linearity of expression model paremeters | |
| 591 if isinstance(newmodel, ExpressionModel): | |
| 592 for name in newmodel.param_names: | |
| 593 if not diff(newmodel.expr, name, name): | |
| 594 if name not in self._linear_parameters: | |
| 595 self._linear_parameters.append(name) | |
| 596 new_parameter_norms[name] = True | |
| 597 # print(f'\nADDING {name} TO LINEAR') | |
| 598 else: | |
| 599 if name not in self._nonlinear_parameters: | |
| 600 self._nonlinear_parameters.append(name) | |
| 601 new_parameter_norms[name] = False | |
| 602 # print(f'\nADDING {name} TO NONLINEAR') | |
| 603 # print(f'new_parameter_norms:\n{new_parameter_norms}') | |
| 604 | |
| 605 # Scale the default initial model parameters | |
| 606 if self._norm is not None: | |
| 607 for name, norm in new_parameter_norms.copy().items(): | |
| 608 par = self._parameters.get(name) | |
| 609 if par is None: | |
| 610 new_parameter_norms.pop(name) | |
| 611 continue | |
| 612 if par.expr is None and norm: | |
| 613 value = par.value*self._norm[1] | |
| 614 _min = par.min | |
| 615 _max = par.max | |
| 616 if not np.isinf(_min) and abs(_min) != float_min: | |
| 617 _min *= self._norm[1] | |
| 618 if not np.isinf(_max) and abs(_max) != float_min: | |
| 619 _max *= self._norm[1] | |
| 620 par.set(value=value, min=_min, max=_max) | |
| 621 # print('\nAfter norm defaults:') | |
| 622 # self._parameters.pretty_print() | |
| 623 # print(f'parameters:\n{parameters}') | |
| 624 # print(f'all_parameters:\n{list(self.parameters)}') | |
| 625 # print(f'new_parameter_norms:\n{new_parameter_norms}') | |
| 626 # print(f'parameter_norms:\n{self._parameter_norms}') | |
| 627 | |
| 628 # Initialize the model parameters from parameters | |
| 629 if prefix is None: | |
| 630 prefix = "" | |
| 631 if parameters is not None: | |
| 632 for parameter in parameters: | |
| 633 name = parameter['name'] | |
| 634 if not isinstance(name, str): | |
| 635 raise ValueError(f'Illegal "name" value ({name}) in input parameters') | |
| 636 if name not in new_parameters: | |
| 637 name = prefix+name | |
| 638 parameter['name'] = name | |
| 639 if name not in new_parameters: | |
| 640 logging.warning(f'Ignoring superfluous parameter info for {name}') | |
| 641 continue | |
| 642 if name in self._parameters: | |
| 643 parameter.pop('name') | |
| 644 if 'norm' in parameter: | |
| 645 if not isinstance(parameter['norm'], bool): | |
| 646 illegal_value(parameter['norm'], 'norm', 'Fit.add_model', | |
| 647 raise_error=True) | |
| 648 new_parameter_norms[name] = parameter['norm'] | |
| 649 parameter.pop('norm') | |
| 650 if parameter.get('expr') is not None: | |
| 651 if 'value' in parameter: | |
| 652 logging.warning(f'Ignoring value in parameter {name} '+ | |
| 653 f'(set by expression: {parameter["expr"]})') | |
| 654 parameter.pop('value') | |
| 655 if 'vary' in parameter: | |
| 656 logging.warning(f'Ignoring vary in parameter {name} '+ | |
| 657 f'(set by expression: {parameter["expr"]})') | |
| 658 parameter.pop('vary') | |
| 659 if 'min' in parameter: | |
| 660 logging.warning(f'Ignoring min in parameter {name} '+ | |
| 661 f'(set by expression: {parameter["expr"]})') | |
| 662 parameter.pop('min') | |
| 663 if 'max' in parameter: | |
| 664 logging.warning(f'Ignoring max in parameter {name} '+ | |
| 665 f'(set by expression: {parameter["expr"]})') | |
| 666 parameter.pop('max') | |
| 667 if 'vary' in parameter: | |
| 668 if not isinstance(parameter['vary'], bool): | |
| 669 illegal_value(parameter['vary'], 'vary', 'Fit.add_model', | |
| 670 raise_error=True) | |
| 671 if not parameter['vary']: | |
| 672 if 'min' in parameter: | |
| 673 logging.warning(f'Ignoring min in parameter {name} in '+ | |
| 674 f'Fit.add_model (vary = {parameter["vary"]})') | |
| 675 parameter.pop('min') | |
| 676 if 'max' in parameter: | |
| 677 logging.warning(f'Ignoring max in parameter {name} in '+ | |
| 678 f'Fit.add_model (vary = {parameter["vary"]})') | |
| 679 parameter.pop('max') | |
| 680 self._parameters[name].set(**parameter) | |
| 681 parameter['name'] = name | |
| 682 else: | |
| 683 illegal_value(parameter, 'parameter name', 'Fit.model', raise_error=True) | |
| 684 self._parameter_norms = {**self._parameter_norms, **new_parameter_norms} | |
| 685 # print('\nAfter parameter init:') | |
| 686 # self._parameters.pretty_print() | |
| 687 # print(f'parameters:\n{parameters}') | |
| 688 # print(f'new_parameter_norms:\n{new_parameter_norms}') | |
| 689 # print(f'parameter_norms:\n{self._parameter_norms}') | |
| 690 # print(f'kwargs:\n{kwargs}') | |
| 691 | |
| 692 # Initialize the model parameters from kwargs | |
| 693 for name, value in {**kwargs}.items(): | |
| 694 full_name = f'{pprefix}{name}' | |
| 695 if full_name in new_parameter_norms and isinstance(value, (int, float)): | |
| 696 kwargs.pop(name) | |
| 697 if self._parameters[full_name].expr is None: | |
| 698 self._parameters[full_name].set(value=value) | |
| 699 else: | |
| 700 logging.warning(f'Ignoring parameter {name} in Fit.fit (set by expression: '+ | |
| 701 f'{self._parameters[full_name].expr})') | |
| 702 # print('\nAfter kwargs init:') | |
| 703 # self._parameters.pretty_print() | |
| 704 # print(f'parameter_norms:\n{self._parameter_norms}') | |
| 705 # print(f'kwargs:\n{kwargs}') | |
| 706 | |
| 707 # Check parameter norms (also need it for expressions to renormalize the errors) | |
| 708 if self._norm is not None and (callable(model) or model == 'expression'): | |
| 709 missing_norm = False | |
| 710 for name in new_parameters.valuesdict(): | |
| 711 if name not in self._parameter_norms: | |
| 712 print(f'new_parameters:\n{new_parameters.valuesdict()}') | |
| 713 print(f'self._parameter_norms:\n{self._parameter_norms}') | |
| 714 logging.error(f'Missing parameter normalization type for {name} in {model}') | |
| 715 missing_norm = True | |
| 716 if missing_norm: | |
| 717 raise ValueError | |
| 718 | |
| 719 # print(f'at end add_model:\nself._parameters:\n{list(self.parameters)}') | |
| 720 # print(f'at end add_model: kwargs = {kwargs}') | |
| 721 # print(f'\nat end add_model: newmodel:\n{newmodel.__dict__}\n') | |
| 722 return(kwargs) | |
| 723 | |
| 724 def fit(self, interactive=False, guess=False, **kwargs): | |
| 725 # Check inputs | |
| 726 if self._model is None: | |
| 727 logging.error('Undefined fit model') | |
| 728 return | |
| 729 if not isinstance(interactive, bool): | |
| 730 illegal_value(interactive, 'interactive', 'Fit.fit', raise_error=True) | |
| 731 if not isinstance(guess, bool): | |
| 732 illegal_value(guess, 'guess', 'Fit.fit', raise_error=True) | |
| 733 if 'try_linear_fit' in kwargs: | |
| 734 try_linear_fit = kwargs.pop('try_linear_fit') | |
| 735 if not isinstance(try_linear_fit, bool): | |
| 736 illegal_value(try_linear_fit, 'try_linear_fit', 'Fit.fit', raise_error=True) | |
| 737 if not self._try_linear_fit: | |
| 738 logging.warning('Ignore superfluous keyword argument "try_linear_fit" (not '+ | |
| 739 'yet supported for callable models)') | |
| 740 else: | |
| 741 self._try_linear_fit = try_linear_fit | |
| 742 # if self._result is None: | |
| 743 # if 'parameters' in kwargs: | |
| 744 # raise ValueError('Invalid parameter parameters ({kwargs["parameters"]})') | |
| 745 # else: | |
| 746 if self._result is not None: | |
| 747 if guess: | |
| 748 logging.warning('Ignoring input parameter guess in Fit.fit during refitting') | |
| 749 guess = False | |
| 750 | |
| 751 # Check for circular expressions | |
| 752 # FIX TODO | |
| 753 # for name1, par1 in self._parameters.items(): | |
| 754 # if par1.expr is not None: | |
| 755 | |
| 756 # Apply mask if supplied: | |
| 757 if 'mask' in kwargs: | |
| 758 self._mask = kwargs.pop('mask') | |
| 759 if self._mask is not None: | |
| 760 self._mask = np.asarray(self._mask).astype(bool) | |
| 761 if self._x.size != self._mask.size: | |
| 762 raise ValueError(f'Inconsistent x and mask dimensions ({self._x.size} vs '+ | |
| 763 f'{self._mask.size})') | |
| 764 | |
| 765 # Estimate initial parameters with build-in lmfit guess method (only for a single model) | |
| 766 # print(f'\nat start fit: kwargs = {kwargs}') | |
| 767 #RV print('\nAt start of fit:') | |
| 768 #RV self._parameters.pretty_print() | |
| 769 # print(f'parameter_norms:\n{self._parameter_norms}') | |
| 770 if guess: | |
| 771 if self._mask is None: | |
| 772 self._parameters = self._model.guess(self._y, x=self._x) | |
| 773 else: | |
| 774 self._parameters = self._model.guess(np.asarray(self._y)[~self._mask], | |
| 775 x=self._x[~self._mask]) | |
| 776 # print('\nAfter guess:') | |
| 777 # self._parameters.pretty_print() | |
| 778 | |
| 779 # Add constant offset for a normalized model | |
| 780 if self._result is None and self._norm is not None and self._norm[0]: | |
| 781 self.add_model('constant', prefix='tmp_normalization_offset_', parameters={'name': 'c', | |
| 782 'value': -self._norm[0], 'vary': False, 'norm': True}) | |
| 783 #'value': -self._norm[0]/self._norm[1], 'vary': False, 'norm': False}) | |
| 784 | |
| 785 # Adjust existing parameters for refit: | |
| 786 if 'parameters' in kwargs: | |
| 787 parameters = kwargs.pop('parameters') | |
| 788 if isinstance(parameters, dict): | |
| 789 parameters = (parameters, ) | |
| 790 elif not is_dict_series(parameters): | |
| 791 illegal_value(parameters, 'parameters', 'Fit.fit', raise_error=True) | |
| 792 for par in parameters: | |
| 793 name = par['name'] | |
| 794 if name not in self._parameters: | |
| 795 raise ValueError(f'Unable to match {name} parameter {par} to an existing one') | |
| 796 if self._parameters[name].expr is not None: | |
| 797 raise ValueError(f'Unable to modify {name} parameter {par} (currently an '+ | |
| 798 'expression)') | |
| 799 if par.get('expr') is not None: | |
| 800 raise KeyError(f'Illegal "expr" key in {name} parameter {par}') | |
| 801 self._parameters[name].set(vary=par.get('vary')) | |
| 802 self._parameters[name].set(min=par.get('min')) | |
| 803 self._parameters[name].set(max=par.get('max')) | |
| 804 self._parameters[name].set(value=par.get('value')) | |
| 805 #RV print('\nAfter adjust:') | |
| 806 #RV self._parameters.pretty_print() | |
| 807 | |
| 808 # Apply parameter updates through keyword arguments | |
| 809 # print(f'kwargs = {kwargs}') | |
| 810 # print(f'parameter_norms = {self._parameter_norms}') | |
| 811 for name in set(self._parameters) & set(kwargs): | |
| 812 value = kwargs.pop(name) | |
| 813 if self._parameters[name].expr is None: | |
| 814 self._parameters[name].set(value=value) | |
| 815 else: | |
| 816 logging.warning(f'Ignoring parameter {name} in Fit.fit (set by expression: '+ | |
| 817 f'{self._parameters[name].expr})') | |
| 818 | |
| 819 # Check for uninitialized parameters | |
| 820 for name, par in self._parameters.items(): | |
| 821 if par.expr is None: | |
| 822 value = par.value | |
| 823 if value is None or np.isinf(value) or np.isnan(value): | |
| 824 if interactive: | |
| 825 value = input_num(f'Enter an initial value for {name}', default=1.0) | |
| 826 else: | |
| 827 value = 1.0 | |
| 828 if self._norm is None or name not in self._parameter_norms: | |
| 829 self._parameters[name].set(value=value) | |
| 830 elif self._parameter_norms[name]: | |
| 831 self._parameters[name].set(value=value*self._norm[1]) | |
| 832 | |
| 833 # Check if model is linear | |
| 834 try: | |
| 835 linear_model = self._check_linearity_model() | |
| 836 except: | |
| 837 linear_model = False | |
| 838 # print(f'\n\n--------> linear_model = {linear_model}\n') | |
| 839 if kwargs.get('check_only_linearity') is not None: | |
| 840 return(linear_model) | |
| 841 | |
| 842 # Normalize the data and initial parameters | |
| 843 #RV print('\nBefore normalization:') | |
| 844 #RV self._parameters.pretty_print() | |
| 845 # print(f'parameter_norms:\n{self._parameter_norms}') | |
| 846 self._normalize() | |
| 847 # print(f'norm = {self._norm}') | |
| 848 #RV print('\nAfter normalization:') | |
| 849 #RV self._parameters.pretty_print() | |
| 850 # self.print_fit_report() | |
| 851 # print(f'parameter_norms:\n{self._parameter_norms}') | |
| 852 | |
| 853 if linear_model: | |
| 854 # Perform a linear fit by direct matrix solution with numpy | |
| 855 try: | |
| 856 if self._mask is None: | |
| 857 self._fit_linear_model(self._x, self._y_norm) | |
| 858 else: | |
| 859 self._fit_linear_model(self._x[~self._mask], | |
| 860 np.asarray(self._y_norm)[~self._mask]) | |
| 861 except: | |
| 862 linear_model = False | |
| 863 if not linear_model: | |
| 864 # Perform a non-linear fit with lmfit | |
| 865 # Prevent initial values from sitting at boundaries | |
| 866 self._parameter_bounds = {name:{'min': par.min, 'max': par.max} for name, par in | |
| 867 self._parameters.items() if par.vary} | |
| 868 for par in self._parameters.values(): | |
| 869 if par.vary: | |
| 870 par.set(value=self._reset_par_at_boundary(par, par.value)) | |
| 871 # print('\nAfter checking boundaries:') | |
| 872 # self._parameters.pretty_print() | |
| 873 | |
| 874 # Perform the fit | |
| 875 # fit_kws = None | |
| 876 # if 'Dfun' in kwargs: | |
| 877 # fit_kws = {'Dfun': kwargs.pop('Dfun')} | |
| 878 # self._result = self._model.fit(self._y_norm, self._parameters, x=self._x, | |
| 879 # fit_kws=fit_kws, **kwargs) | |
| 880 if self._mask is None: | |
| 881 self._result = self._model.fit(self._y_norm, self._parameters, x=self._x, **kwargs) | |
| 882 else: | |
| 883 self._result = self._model.fit(np.asarray(self._y_norm)[~self._mask], | |
| 884 self._parameters, x=self._x[~self._mask], **kwargs) | |
| 885 #RV print('\nAfter fit:') | |
| 886 # print(f'\nself._result ({self._result}):\n\t{self._result.__dict__}') | |
| 887 #RV self._parameters.pretty_print() | |
| 888 # self.print_fit_report() | |
| 889 | |
| 890 # Set internal parameter values to fit results upon success | |
| 891 if self.success: | |
| 892 for name, par in self._parameters.items(): | |
| 893 if par.expr is None and par.vary: | |
| 894 par.set(value=self._result.params[name].value) | |
| 895 # print('\nAfter update parameter values:') | |
| 896 # self._parameters.pretty_print() | |
| 897 | |
| 898 # Renormalize the data and results | |
| 899 self._renormalize() | |
| 900 #RV print('\nAfter renormalization:') | |
| 901 #RV self._parameters.pretty_print() | |
| 902 # self.print_fit_report() | |
| 903 | |
| 904 def plot(self, y=None, y_title=None, result=None, skip_init=False, plot_comp_legends=False, | |
| 905 plot_residual=False, plot_masked_data=True, **kwargs): | |
| 906 if result is None: | |
| 907 result = self._result | |
| 908 if result is None: | |
| 909 return | |
| 910 plots = [] | |
| 911 legend = [] | |
| 912 if self._mask is None: | |
| 913 mask = np.zeros(self._x.size).astype(bool) | |
| 914 plot_masked_data = False | |
| 915 else: | |
| 916 mask = self._mask | |
| 917 if y is not None: | |
| 918 if not isinstance(y, (tuple, list, np.ndarray)): | |
| 919 illegal_value(y, 'y', 'Fit.plot') | |
| 920 if len(y) != len(self._x): | |
| 921 logging.warning('Ignoring parameter y in Fit.plot (wrong dimension)') | |
| 922 y = None | |
| 923 if y is not None: | |
| 924 if y_title is None or not isinstance(y_title, str): | |
| 925 y_title = 'data' | |
| 926 plots += [(self._x, y, '.')] | |
| 927 legend += [y_title] | |
| 928 if self._y is not None: | |
| 929 plots += [(self._x, np.asarray(self._y), 'b.')] | |
| 930 legend += ['data'] | |
| 931 if plot_masked_data: | |
| 932 plots += [(self._x[mask], np.asarray(self._y)[mask], 'bx')] | |
| 933 legend += ['masked data'] | |
| 934 if isinstance(plot_residual, bool) and plot_residual: | |
| 935 plots += [(self._x[~mask], result.residual, 'k-')] | |
| 936 legend += ['residual'] | |
| 937 plots += [(self._x[~mask], result.best_fit, 'k-')] | |
| 938 legend += ['best fit'] | |
| 939 if not skip_init and hasattr(result, 'init_fit'): | |
| 940 plots += [(self._x[~mask], result.init_fit, 'g-')] | |
| 941 legend += ['init'] | |
| 942 components = result.eval_components(x=self._x[~mask]) | |
| 943 num_components = len(components) | |
| 944 if 'tmp_normalization_offset_' in components: | |
| 945 num_components -= 1 | |
| 946 if num_components > 1: | |
| 947 eval_index = 0 | |
| 948 for modelname, y in components.items(): | |
| 949 if modelname == 'tmp_normalization_offset_': | |
| 950 continue | |
| 951 if modelname == '_eval': | |
| 952 modelname = f'eval{eval_index}' | |
| 953 if len(modelname) > 20: | |
| 954 modelname = f'{modelname[0:16]} ...' | |
| 955 if isinstance(y, (int, float)): | |
| 956 y *= np.ones(self._x[~mask].size) | |
| 957 plots += [(self._x[~mask], y, '--')] | |
| 958 if plot_comp_legends: | |
| 959 if modelname[-1] == '_': | |
| 960 legend.append(modelname[:-1]) | |
| 961 else: | |
| 962 legend.append(modelname) | |
| 963 title = kwargs.get('title') | |
| 964 if title is not None: | |
| 965 kwargs.pop('title') | |
| 966 quick_plot(tuple(plots), legend=legend, title=title, block=True, **kwargs) | |
| 967 | |
| 968 @staticmethod | |
| 969 def guess_init_peak(x, y, *args, center_guess=None, use_max_for_center=True): | |
| 970 """ Return a guess for the initial height, center and fwhm for a peak | |
| 971 """ | |
| 972 # print(f'\n\nargs = {args}') | |
| 973 # print(f'center_guess = {center_guess}') | |
| 974 # quick_plot(x, y, vlines=center_guess, block=True) | |
| 975 center_guesses = None | |
| 976 x = np.asarray(x) | |
| 977 y = np.asarray(y) | |
| 978 if len(x) != len(y): | |
| 979 logging.error(f'Invalid x and y lengths ({len(x)}, {len(y)}), skip initial guess') | |
| 980 return(None, None, None) | |
| 981 if isinstance(center_guess, (int, float)): | |
| 982 if len(args): | |
| 983 logging.warning('Ignoring additional arguments for single center_guess value') | |
| 984 center_guesses = [center_guess] | |
| 985 elif isinstance(center_guess, (tuple, list, np.ndarray)): | |
| 986 if len(center_guess) == 1: | |
| 987 logging.warning('Ignoring additional arguments for single center_guess value') | |
| 988 if not isinstance(center_guess[0], (int, float)): | |
| 989 raise ValueError(f'Invalid parameter center_guess ({type(center_guess[0])})') | |
| 990 center_guess = center_guess[0] | |
| 991 else: | |
| 992 if len(args) != 1: | |
| 993 raise ValueError(f'Invalid number of arguments ({len(args)})') | |
| 994 n = args[0] | |
| 995 if not is_index(n, 0, len(center_guess)): | |
| 996 raise ValueError('Invalid argument') | |
| 997 center_guesses = center_guess | |
| 998 center_guess = center_guesses[n] | |
| 999 elif center_guess is not None: | |
| 1000 raise ValueError(f'Invalid center_guess type ({type(center_guess)})') | |
| 1001 # print(f'x = {x}') | |
| 1002 # print(f'y = {y}') | |
| 1003 # print(f'center_guess = {center_guess}') | |
| 1004 | |
| 1005 # Sort the inputs | |
| 1006 index = np.argsort(x) | |
| 1007 x = x[index] | |
| 1008 y = y[index] | |
| 1009 miny = y.min() | |
| 1010 # print(f'miny = {miny}') | |
| 1011 # print(f'x_range = {x[0]} {x[-1]} {len(x)}') | |
| 1012 # print(f'y_range = {y[0]} {y[-1]} {len(y)}') | |
| 1013 # quick_plot(x, y, vlines=center_guess, block=True) | |
| 1014 | |
| 1015 # xx = x | |
| 1016 # yy = y | |
| 1017 # Set range for current peak | |
| 1018 # print(f'n = {n}') | |
| 1019 # print(f'center_guesses = {center_guesses}') | |
| 1020 if center_guesses is not None: | |
| 1021 if len(center_guesses) > 1: | |
| 1022 index = np.argsort(center_guesses) | |
| 1023 n = list(index).index(n) | |
| 1024 # print(f'n = {n}') | |
| 1025 # print(f'index = {index}') | |
| 1026 center_guesses = np.asarray(center_guesses)[index] | |
| 1027 # print(f'center_guesses = {center_guesses}') | |
| 1028 if n == 0: | |
| 1029 low = 0 | |
| 1030 upp = index_nearest(x, (center_guesses[0]+center_guesses[1])/2) | |
| 1031 elif n == len(center_guesses)-1: | |
| 1032 low = index_nearest(x, (center_guesses[n-1]+center_guesses[n])/2) | |
| 1033 upp = len(x) | |
| 1034 else: | |
| 1035 low = index_nearest(x, (center_guesses[n-1]+center_guesses[n])/2) | |
| 1036 upp = index_nearest(x, (center_guesses[n]+center_guesses[n+1])/2) | |
| 1037 # print(f'low = {low}') | |
| 1038 # print(f'upp = {upp}') | |
| 1039 x = x[low:upp] | |
| 1040 y = y[low:upp] | |
| 1041 # quick_plot(x, y, vlines=(x[0], center_guess, x[-1]), block=True) | |
| 1042 | |
| 1043 # Estimate FHHM | |
| 1044 maxy = y.max() | |
| 1045 # print(f'x_range = {x[0]} {x[-1]} {len(x)}') | |
| 1046 # print(f'y_range = {y[0]} {y[-1]} {len(y)} {miny} {maxy}') | |
| 1047 # print(f'center_guess = {center_guess}') | |
| 1048 if center_guess is None: | |
| 1049 center_index = np.argmax(y) | |
| 1050 center = x[center_index] | |
| 1051 height = maxy-miny | |
| 1052 else: | |
| 1053 if use_max_for_center: | |
| 1054 center_index = np.argmax(y) | |
| 1055 center = x[center_index] | |
| 1056 if center_index < 0.1*len(x) or center_index > 0.9*len(x): | |
| 1057 center_index = index_nearest(x, center_guess) | |
| 1058 center = center_guess | |
| 1059 else: | |
| 1060 center_index = index_nearest(x, center_guess) | |
| 1061 center = center_guess | |
| 1062 height = y[center_index]-miny | |
| 1063 # print(f'center_index = {center_index}') | |
| 1064 # print(f'center = {center}') | |
| 1065 # print(f'height = {height}') | |
| 1066 half_height = miny+0.5*height | |
| 1067 # print(f'half_height = {half_height}') | |
| 1068 fwhm_index1 = 0 | |
| 1069 for i in range(center_index, fwhm_index1, -1): | |
| 1070 if y[i] < half_height: | |
| 1071 fwhm_index1 = i | |
| 1072 break | |
| 1073 # print(f'fwhm_index1 = {fwhm_index1} {x[fwhm_index1]}') | |
| 1074 fwhm_index2 = len(x)-1 | |
| 1075 for i in range(center_index, fwhm_index2): | |
| 1076 if y[i] < half_height: | |
| 1077 fwhm_index2 = i | |
| 1078 break | |
| 1079 # print(f'fwhm_index2 = {fwhm_index2} {x[fwhm_index2]}') | |
| 1080 # quick_plot((x,y,'o'), vlines=(x[fwhm_index1], center, x[fwhm_index2]), block=True) | |
| 1081 if fwhm_index1 == 0 and fwhm_index2 < len(x)-1: | |
| 1082 fwhm = 2*(x[fwhm_index2]-center) | |
| 1083 elif fwhm_index1 > 0 and fwhm_index2 == len(x)-1: | |
| 1084 fwhm = 2*(center-x[fwhm_index1]) | |
| 1085 else: | |
| 1086 fwhm = x[fwhm_index2]-x[fwhm_index1] | |
| 1087 # print(f'fwhm_index1 = {fwhm_index1} {x[fwhm_index1]}') | |
| 1088 # print(f'fwhm_index2 = {fwhm_index2} {x[fwhm_index2]}') | |
| 1089 # print(f'fwhm = {fwhm}') | |
| 1090 | |
| 1091 # Return height, center and FWHM | |
| 1092 # quick_plot((x,y,'o'), (xx,yy), vlines=(x[fwhm_index1], center, x[fwhm_index2]), block=True) | |
| 1093 return(height, center, fwhm) | |
| 1094 | |
| 1095 def _check_linearity_model(self): | |
| 1096 """Identify the linearity of all model parameters and check if the model is linear or not | |
| 1097 """ | |
| 1098 if not self._try_linear_fit: | |
| 1099 logging.info('Skip linearity check (not yet supported for callable models)') | |
| 1100 return(False) | |
| 1101 free_parameters = [name for name, par in self._parameters.items() if par.vary] | |
| 1102 for component in self._model.components: | |
| 1103 if 'tmp_normalization_offset_c' in component.param_names: | |
| 1104 continue | |
| 1105 if isinstance(component, ExpressionModel): | |
| 1106 for name in free_parameters: | |
| 1107 if diff(component.expr, name, name): | |
| 1108 # print(f'\t\t{component.expr} is non-linear in {name}') | |
| 1109 self._nonlinear_parameters.append(name) | |
| 1110 if name in self._linear_parameters: | |
| 1111 self._linear_parameters.remove(name) | |
| 1112 else: | |
| 1113 model_parameters = component.param_names.copy() | |
| 1114 for basename, hint in component.param_hints.items(): | |
| 1115 name = f'{component.prefix}{basename}' | |
| 1116 if hint.get('expr') is not None: | |
| 1117 model_parameters.remove(name) | |
| 1118 for name in model_parameters: | |
| 1119 expr = self._parameters[name].expr | |
| 1120 if expr is not None: | |
| 1121 for nname in free_parameters: | |
| 1122 if name in self._nonlinear_parameters: | |
| 1123 if diff(expr, nname): | |
| 1124 # print(f'\t\t{component} is non-linear in {nname} (through {name} = "{expr}")') | |
| 1125 self._nonlinear_parameters.append(nname) | |
| 1126 if nname in self._linear_parameters: | |
| 1127 self._linear_parameters.remove(nname) | |
| 1128 else: | |
| 1129 assert(name in self._linear_parameters) | |
| 1130 # print(f'\n\nexpr ({type(expr)}) = {expr}\nnname ({type(nname)}) = {nname}\n\n') | |
| 1131 if diff(expr, nname, nname): | |
| 1132 # print(f'\t\t{component} is non-linear in {nname} (through {name} = "{expr}")') | |
| 1133 self._nonlinear_parameters.append(nname) | |
| 1134 if nname in self._linear_parameters: | |
| 1135 self._linear_parameters.remove(nname) | |
| 1136 # print(f'\nfree parameters:\n\t{free_parameters}') | |
| 1137 # print(f'linear parameters:\n\t{self._linear_parameters}') | |
| 1138 # print(f'nonlinear parameters:\n\t{self._nonlinear_parameters}\n') | |
| 1139 if any(True for name in self._nonlinear_parameters if self._parameters[name].vary): | |
| 1140 return(False) | |
| 1141 return(True) | |
| 1142 | |
| 1143 def _fit_linear_model(self, x, y): | |
| 1144 """Perform a linear fit by direct matrix solution with numpy | |
| 1145 """ | |
| 1146 # Construct the matrix and the free parameter vector | |
| 1147 # print(f'\nparameters:') | |
| 1148 # self._parameters.pretty_print() | |
| 1149 # print(f'\nparameter_norms:\n\t{self._parameter_norms}') | |
| 1150 # print(f'\nlinear_parameters:\n\t{self._linear_parameters}') | |
| 1151 # print(f'nonlinear_parameters:\n\t{self._nonlinear_parameters}') | |
| 1152 free_parameters = [name for name, par in self._parameters.items() if par.vary] | |
| 1153 # print(f'free parameters:\n\t{free_parameters}\n') | |
| 1154 expr_parameters = {name:par.expr for name, par in self._parameters.items() | |
| 1155 if par.expr is not None} | |
| 1156 model_parameters = [] | |
| 1157 for component in self._model.components: | |
| 1158 if 'tmp_normalization_offset_c' in component.param_names: | |
| 1159 continue | |
| 1160 model_parameters += component.param_names | |
| 1161 for basename, hint in component.param_hints.items(): | |
| 1162 name = f'{component.prefix}{basename}' | |
| 1163 if hint.get('expr') is not None: | |
| 1164 expr_parameters.pop(name) | |
| 1165 model_parameters.remove(name) | |
| 1166 # print(f'expr parameters:\n{expr_parameters}') | |
| 1167 # print(f'model parameters:\n\t{model_parameters}\n') | |
| 1168 norm = 1.0 | |
| 1169 if self._normalized: | |
| 1170 norm = self._norm[1] | |
| 1171 # print(f'\n\nself._normalized = {self._normalized}\nnorm = {norm}\nself._norm = {self._norm}\n') | |
| 1172 # Add expression parameters to asteval | |
| 1173 ast = Interpreter() | |
| 1174 # print(f'Adding to asteval sym table:') | |
| 1175 for name, expr in expr_parameters.items(): | |
| 1176 # print(f'\tadding {name} {expr}') | |
| 1177 ast.symtable[name] = expr | |
| 1178 # Add constant parameters to asteval | |
| 1179 # (renormalize to use correctly in evaluation of expression models) | |
| 1180 for name, par in self._parameters.items(): | |
| 1181 if par.expr is None and not par.vary: | |
| 1182 if self._parameter_norms[name]: | |
| 1183 # print(f'\tadding {name} {par.value*norm}') | |
| 1184 ast.symtable[name] = par.value*norm | |
| 1185 else: | |
| 1186 # print(f'\tadding {name} {par.value}') | |
| 1187 ast.symtable[name] = par.value | |
| 1188 A = np.zeros((len(x), len(free_parameters)), dtype='float64') | |
| 1189 y_const = np.zeros(len(x), dtype='float64') | |
| 1190 have_expression_model = False | |
| 1191 for component in self._model.components: | |
| 1192 if isinstance(component, ConstantModel): | |
| 1193 name = component.param_names[0] | |
| 1194 # print(f'\nConstant model: {name} {self._parameters[name]}\n') | |
| 1195 if name in free_parameters: | |
| 1196 # print(f'\t\t{name} is a free constant set matrix column {free_parameters.index(name)} to 1.0') | |
| 1197 A[:,free_parameters.index(name)] = 1.0 | |
| 1198 else: | |
| 1199 if self._parameter_norms[name]: | |
| 1200 delta_y_const = self._parameters[name]*np.ones(len(x)) | |
| 1201 else: | |
| 1202 delta_y_const = (self._parameters[name]*norm)*np.ones(len(x)) | |
| 1203 y_const += delta_y_const | |
| 1204 # print(f'\ndelta_y_const ({type(delta_y_const)}):\n{delta_y_const}\n') | |
| 1205 elif isinstance(component, ExpressionModel): | |
| 1206 have_expression_model = True | |
| 1207 const_expr = component.expr | |
| 1208 # print(f'\nExpression model:\nconst_expr: {const_expr}\n') | |
| 1209 for name in free_parameters: | |
| 1210 dexpr_dname = diff(component.expr, name) | |
| 1211 if dexpr_dname: | |
| 1212 const_expr = f'{const_expr}-({str(dexpr_dname)})*{name}' | |
| 1213 # print(f'\tconst_expr: {const_expr}') | |
| 1214 if not self._parameter_norms[name]: | |
| 1215 dexpr_dname = f'({dexpr_dname})/{norm}' | |
| 1216 # print(f'\t{component.expr} is linear in {name}\n\t\tadd "{str(dexpr_dname)}" to matrix as column {free_parameters.index(name)}') | |
| 1217 fx = [(lambda _: ast.eval(str(dexpr_dname)))(ast(f'x={v}')) for v in x] | |
| 1218 # print(f'\tfx:\n{fx}') | |
| 1219 if len(ast.error): | |
| 1220 raise ValueError(f'Unable to evaluate {dexpr_dname}') | |
| 1221 A[:,free_parameters.index(name)] += fx | |
| 1222 # if self._parameter_norms[name]: | |
| 1223 # print(f'\t\t{component.expr} is linear in {name} add "{str(dexpr_dname)}" to matrix as column {free_parameters.index(name)}') | |
| 1224 # A[:,free_parameters.index(name)] += fx | |
| 1225 # else: | |
| 1226 # print(f'\t\t{component.expr} is linear in {name} add "({str(dexpr_dname)})/{norm}" to matrix as column {free_parameters.index(name)}') | |
| 1227 # A[:,free_parameters.index(name)] += np.asarray(fx)/norm | |
| 1228 # FIX: find another solution if expr not supported by simplify | |
| 1229 const_expr = str(simplify(f'({const_expr})/{norm}')) | |
| 1230 # print(f'\nconst_expr: {const_expr}') | |
| 1231 delta_y_const = [(lambda _: ast.eval(const_expr))(ast(f'x = {v}')) for v in x] | |
| 1232 y_const += delta_y_const | |
| 1233 # print(f'\ndelta_y_const ({type(delta_y_const)}):\n{delta_y_const}\n') | |
| 1234 if len(ast.error): | |
| 1235 raise ValueError(f'Unable to evaluate {const_expr}') | |
| 1236 else: | |
| 1237 free_model_parameters = [name for name in component.param_names | |
| 1238 if name in free_parameters or name in expr_parameters] | |
| 1239 # print(f'\nBuild-in model ({component}):\nfree_model_parameters: {free_model_parameters}\n') | |
| 1240 if not len(free_model_parameters): | |
| 1241 y_const += component.eval(params=self._parameters, x=x) | |
| 1242 elif isinstance(component, LinearModel): | |
| 1243 if f'{component.prefix}slope' in free_model_parameters: | |
| 1244 A[:,free_parameters.index(f'{component.prefix}slope')] = x | |
| 1245 else: | |
| 1246 y_const += self._parameters[f'{component.prefix}slope'].value*x | |
| 1247 if f'{component.prefix}intercept' in free_model_parameters: | |
| 1248 A[:,free_parameters.index(f'{component.prefix}intercept')] = 1.0 | |
| 1249 else: | |
| 1250 y_const += self._parameters[f'{component.prefix}intercept'].value* \ | |
| 1251 np.ones(len(x)) | |
| 1252 elif isinstance(component, QuadraticModel): | |
| 1253 if f'{component.prefix}a' in free_model_parameters: | |
| 1254 A[:,free_parameters.index(f'{component.prefix}a')] = x**2 | |
| 1255 else: | |
| 1256 y_const += self._parameters[f'{component.prefix}a'].value*x**2 | |
| 1257 if f'{component.prefix}b' in free_model_parameters: | |
| 1258 A[:,free_parameters.index(f'{component.prefix}b')] = x | |
| 1259 else: | |
| 1260 y_const += self._parameters[f'{component.prefix}b'].value*x | |
| 1261 if f'{component.prefix}c' in free_model_parameters: | |
| 1262 A[:,free_parameters.index(f'{component.prefix}c')] = 1.0 | |
| 1263 else: | |
| 1264 y_const += self._parameters[f'{component.prefix}c'].value*np.ones(len(x)) | |
| 1265 else: | |
| 1266 # At this point each build-in model must be strictly proportional to each linear | |
| 1267 # model parameter. Without this assumption, the model equation is needed | |
| 1268 # For the current build-in lmfit models, this can only ever be the amplitude | |
| 1269 assert(len(free_model_parameters) == 1) | |
| 1270 name = f'{component.prefix}amplitude' | |
| 1271 assert(free_model_parameters[0] == name) | |
| 1272 assert(self._parameter_norms[name]) | |
| 1273 expr = self._parameters[name].expr | |
| 1274 if expr is None: | |
| 1275 # print(f'\t{component} is linear in {name} add to matrix as column {free_parameters.index(name)}') | |
| 1276 parameters = deepcopy(self._parameters) | |
| 1277 parameters[name].set(value=1.0) | |
| 1278 index = free_parameters.index(name) | |
| 1279 A[:,free_parameters.index(name)] += component.eval(params=parameters, x=x) | |
| 1280 else: | |
| 1281 const_expr = expr | |
| 1282 # print(f'\tconst_expr: {const_expr}') | |
| 1283 parameters = deepcopy(self._parameters) | |
| 1284 parameters[name].set(value=1.0) | |
| 1285 dcomp_dname = component.eval(params=parameters, x=x) | |
| 1286 # print(f'\tdcomp_dname ({type(dcomp_dname)}):\n{dcomp_dname}') | |
| 1287 for nname in free_parameters: | |
| 1288 dexpr_dnname = diff(expr, nname) | |
| 1289 if dexpr_dnname: | |
| 1290 assert(self._parameter_norms[name]) | |
| 1291 # print(f'\t\td({expr})/d{nname} = {dexpr_dnname}') | |
| 1292 # print(f'\t\t{component} is linear in {nname} (through {name} = "{expr}", add to matrix as column {free_parameters.index(nname)})') | |
| 1293 fx = np.asarray(dexpr_dnname*dcomp_dname, dtype='float64') | |
| 1294 # print(f'\t\tfx ({type(fx)}): {fx}') | |
| 1295 # print(f'free_parameters.index({nname}): {free_parameters.index(nname)}') | |
| 1296 if self._parameter_norms[nname]: | |
| 1297 A[:,free_parameters.index(nname)] += fx | |
| 1298 else: | |
| 1299 A[:,free_parameters.index(nname)] += fx/norm | |
| 1300 const_expr = f'{const_expr}-({dexpr_dnname})*{nname}' | |
| 1301 # print(f'\t\tconst_expr: {const_expr}') | |
| 1302 const_expr = str(simplify(f'({const_expr})/{norm}')) | |
| 1303 # print(f'\tconst_expr: {const_expr}') | |
| 1304 fx = [(lambda _: ast.eval(const_expr))(ast(f'x = {v}')) for v in x] | |
| 1305 # print(f'\tfx: {fx}') | |
| 1306 delta_y_const = np.multiply(fx, dcomp_dname) | |
| 1307 y_const += delta_y_const | |
| 1308 # print(f'\ndelta_y_const ({type(delta_y_const)}):\n{delta_y_const}\n') | |
| 1309 # print(A) | |
| 1310 # print(y_const) | |
| 1311 solution, residual, rank, s = np.linalg.lstsq(A, y-y_const, rcond=None) | |
| 1312 # print(f'\nsolution ({type(solution)} {solution.shape}):\n\t{solution}') | |
| 1313 # print(f'\nresidual ({type(residual)} {residual.shape}):\n\t{residual}') | |
| 1314 # print(f'\nrank ({type(rank)} {rank.shape}):\n\t{rank}') | |
| 1315 # print(f'\ns ({type(s)} {s.shape}):\n\t{s}\n') | |
| 1316 | |
| 1317 # Assemble result (compensate for normalization in expression models) | |
| 1318 for name, value in zip(free_parameters, solution): | |
| 1319 self._parameters[name].set(value=value) | |
| 1320 if self._normalized and (have_expression_model or len(expr_parameters)): | |
| 1321 for name, norm in self._parameter_norms.items(): | |
| 1322 par = self._parameters[name] | |
| 1323 if par.expr is None and norm: | |
| 1324 self._parameters[name].set(value=par.value*self._norm[1]) | |
| 1325 # self._parameters.pretty_print() | |
| 1326 # print(f'\nself._parameter_norms:\n\t{self._parameter_norms}') | |
| 1327 self._result = ModelResult(self._model, deepcopy(self._parameters)) | |
| 1328 self._result.best_fit = self._model.eval(params=self._parameters, x=x) | |
| 1329 if self._normalized and (have_expression_model or len(expr_parameters)): | |
| 1330 if 'tmp_normalization_offset_c' in self._parameters: | |
| 1331 offset = self._parameters['tmp_normalization_offset_c'] | |
| 1332 else: | |
| 1333 offset = 0.0 | |
| 1334 self._result.best_fit = (self._result.best_fit-offset-self._norm[0])/self._norm[1] | |
| 1335 if self._normalized: | |
| 1336 for name, norm in self._parameter_norms.items(): | |
| 1337 par = self._parameters[name] | |
| 1338 if par.expr is None and norm: | |
| 1339 value = par.value/self._norm[1] | |
| 1340 self._parameters[name].set(value=value) | |
| 1341 self._result.params[name].set(value=value) | |
| 1342 # self._parameters.pretty_print() | |
| 1343 self._result.residual = self._result.best_fit-y | |
| 1344 self._result.components = self._model.components | |
| 1345 self._result.init_params = None | |
| 1346 # quick_plot((x, y, '.'), (x, y_const, 'g'), (x, self._result.best_fit, 'k'), (x, self._result.residual, 'r'), block=True) | |
| 1347 | |
| 1348 def _normalize(self): | |
| 1349 """Normalize the data and initial parameters | |
| 1350 """ | |
| 1351 if self._normalized: | |
| 1352 return | |
| 1353 if self._norm is None: | |
| 1354 if self._y is not None and self._y_norm is None: | |
| 1355 self._y_norm = np.asarray(self._y) | |
| 1356 else: | |
| 1357 if self._y is not None and self._y_norm is None: | |
| 1358 self._y_norm = (np.asarray(self._y)-self._norm[0])/self._norm[1] | |
| 1359 self._y_range = 1.0 | |
| 1360 for name, norm in self._parameter_norms.items(): | |
| 1361 par = self._parameters[name] | |
| 1362 if par.expr is None and norm: | |
| 1363 value = par.value/self._norm[1] | |
| 1364 _min = par.min | |
| 1365 _max = par.max | |
| 1366 if not np.isinf(_min) and abs(_min) != float_min: | |
| 1367 _min /= self._norm[1] | |
| 1368 if not np.isinf(_max) and abs(_max) != float_min: | |
| 1369 _max /= self._norm[1] | |
| 1370 par.set(value=value, min=_min, max=_max) | |
| 1371 self._normalized = True | |
| 1372 | |
| 1373 def _renormalize(self): | |
| 1374 """Renormalize the data and results | |
| 1375 """ | |
| 1376 if self._norm is None or not self._normalized: | |
| 1377 return | |
| 1378 self._normalized = False | |
| 1379 for name, norm in self._parameter_norms.items(): | |
| 1380 par = self._parameters[name] | |
| 1381 if par.expr is None and norm: | |
| 1382 value = par.value*self._norm[1] | |
| 1383 _min = par.min | |
| 1384 _max = par.max | |
| 1385 if not np.isinf(_min) and abs(_min) != float_min: | |
| 1386 _min *= self._norm[1] | |
| 1387 if not np.isinf(_max) and abs(_max) != float_min: | |
| 1388 _max *= self._norm[1] | |
| 1389 par.set(value=value, min=_min, max=_max) | |
| 1390 if self._result is None: | |
| 1391 return | |
| 1392 self._result.best_fit = self._result.best_fit*self._norm[1]+self._norm[0] | |
| 1393 for name, par in self._result.params.items(): | |
| 1394 if self._parameter_norms.get(name, False): | |
| 1395 if par.stderr is not None: | |
| 1396 par.stderr *= self._norm[1] | |
| 1397 if par.expr is None: | |
| 1398 _min = par.min | |
| 1399 _max = par.max | |
| 1400 value = par.value*self._norm[1] | |
| 1401 if par.init_value is not None: | |
| 1402 par.init_value *= self._norm[1] | |
| 1403 if not np.isinf(_min) and abs(_min) != float_min: | |
| 1404 _min *= self._norm[1] | |
| 1405 if not np.isinf(_max) and abs(_max) != float_min: | |
| 1406 _max *= self._norm[1] | |
| 1407 par.set(value=value, min=_min, max=_max) | |
| 1408 if hasattr(self._result, 'init_fit'): | |
| 1409 self._result.init_fit = self._result.init_fit*self._norm[1]+self._norm[0] | |
| 1410 if hasattr(self._result, 'init_values'): | |
| 1411 init_values = {} | |
| 1412 for name, value in self._result.init_values.items(): | |
| 1413 if name not in self._parameter_norms or self._parameters[name].expr is not None: | |
| 1414 init_values[name] = value | |
| 1415 elif self._parameter_norms[name]: | |
| 1416 init_values[name] = value*self._norm[1] | |
| 1417 self._result.init_values = init_values | |
| 1418 for name, par in self._result.init_params.items(): | |
| 1419 if par.expr is None and self._parameter_norms.get(name, False): | |
| 1420 value = par.value | |
| 1421 _min = par.min | |
| 1422 _max = par.max | |
| 1423 value *= self._norm[1] | |
| 1424 if not np.isinf(_min) and abs(_min) != float_min: | |
| 1425 _min *= self._norm[1] | |
| 1426 if not np.isinf(_max) and abs(_max) != float_min: | |
| 1427 _max *= self._norm[1] | |
| 1428 par.set(value=value, min=_min, max=_max) | |
| 1429 par.init_value = par.value | |
| 1430 # Don't renormalize chisqr, it has no useful meaning in physical units | |
| 1431 #self._result.chisqr *= self._norm[1]*self._norm[1] | |
| 1432 if self._result.covar is not None: | |
| 1433 for i, name in enumerate(self._result.var_names): | |
| 1434 if self._parameter_norms.get(name, False): | |
| 1435 for j in range(len(self._result.var_names)): | |
| 1436 if self._result.covar[i,j] is not None: | |
| 1437 self._result.covar[i,j] *= self._norm[1] | |
| 1438 if self._result.covar[j,i] is not None: | |
| 1439 self._result.covar[j,i] *= self._norm[1] | |
| 1440 # Don't renormalize redchi, it has no useful meaning in physical units | |
| 1441 #self._result.redchi *= self._norm[1]*self._norm[1] | |
| 1442 if self._result.residual is not None: | |
| 1443 self._result.residual *= self._norm[1] | |
| 1444 | |
| 1445 def _reset_par_at_boundary(self, par, value): | |
| 1446 assert(par.vary) | |
| 1447 name = par.name | |
| 1448 _min = self._parameter_bounds[name]['min'] | |
| 1449 _max = self._parameter_bounds[name]['max'] | |
| 1450 if np.isinf(_min): | |
| 1451 if not np.isinf(_max): | |
| 1452 if self._parameter_norms.get(name, False): | |
| 1453 upp = _max-0.1*self._y_range | |
| 1454 elif _max == 0.0: | |
| 1455 upp = _max-0.1 | |
| 1456 else: | |
| 1457 upp = _max-0.1*abs(_max) | |
| 1458 if value >= upp: | |
| 1459 return(upp) | |
| 1460 else: | |
| 1461 if np.isinf(_max): | |
| 1462 if self._parameter_norms.get(name, False): | |
| 1463 low = _min+0.1*self._y_range | |
| 1464 elif _min == 0.0: | |
| 1465 low = _min+0.1 | |
| 1466 else: | |
| 1467 low = _min+0.1*abs(_min) | |
| 1468 if value <= low: | |
| 1469 return(low) | |
| 1470 else: | |
| 1471 low = 0.9*_min+0.1*_max | |
| 1472 upp = 0.1*_min+0.9*_max | |
| 1473 if value <= low: | |
| 1474 return(low) | |
| 1475 elif value >= upp: | |
| 1476 return(upp) | |
| 1477 return(value) | |
| 1478 | |
| 1479 | |
| 1480 class FitMultipeak(Fit): | |
| 1481 """Fit data with multiple peaks | |
| 1482 """ | |
| 1483 def __init__(self, y, x=None, normalize=True): | |
| 1484 super().__init__(y, x=x, normalize=normalize) | |
| 1485 self._fwhm_max = None | |
| 1486 self._sigma_max = None | |
| 1487 | |
| 1488 @classmethod | |
| 1489 def fit_multipeak(cls, y, centers, x=None, normalize=True, peak_models='gaussian', | |
| 1490 center_exprs=None, fit_type=None, background_order=None, background_exp=False, | |
| 1491 fwhm_max=None, plot_components=False): | |
| 1492 """Make sure that centers and fwhm_max are in the correct units and consistent with expr | |
| 1493 for a uniform fit (fit_type == 'uniform') | |
| 1494 """ | |
| 1495 fit = cls(y, x=x, normalize=normalize) | |
| 1496 success = fit.fit(centers, fit_type=fit_type, peak_models=peak_models, fwhm_max=fwhm_max, | |
| 1497 center_exprs=center_exprs, background_order=background_order, | |
| 1498 background_exp=background_exp, plot_components=plot_components) | |
| 1499 if success: | |
| 1500 return(fit.best_fit, fit.residual, fit.best_values, fit.best_errors, fit.redchi, \ | |
| 1501 fit.success) | |
| 1502 else: | |
| 1503 return(np.array([]), np.array([]), {}, {}, float_max, False) | |
| 1504 | |
| 1505 def fit(self, centers, fit_type=None, peak_models=None, center_exprs=None, fwhm_max=None, | |
| 1506 background_order=None, background_exp=False, plot_components=False, | |
| 1507 param_constraint=False): | |
| 1508 self._fwhm_max = fwhm_max | |
| 1509 # Create the multipeak model | |
| 1510 self._create_model(centers, fit_type, peak_models, center_exprs, background_order, | |
| 1511 background_exp, param_constraint) | |
| 1512 | |
| 1513 # RV: Obsolete Normalize the data and results | |
| 1514 # print('\nBefore fit before normalization in FitMultipeak:') | |
| 1515 # self._parameters.pretty_print() | |
| 1516 # self._normalize() | |
| 1517 # print('\nBefore fit after normalization in FitMultipeak:') | |
| 1518 # self._parameters.pretty_print() | |
| 1519 | |
| 1520 # Perform the fit | |
| 1521 try: | |
| 1522 if param_constraint: | |
| 1523 super().fit(fit_kws={'xtol': 1.e-5, 'ftol': 1.e-5, 'gtol': 1.e-5}) | |
| 1524 else: | |
| 1525 super().fit() | |
| 1526 except: | |
| 1527 return(False) | |
| 1528 | |
| 1529 # Check for valid fit parameter results | |
| 1530 fit_failure = self._check_validity() | |
| 1531 success = True | |
| 1532 if fit_failure: | |
| 1533 if param_constraint: | |
| 1534 logging.warning(' -> Should not happen with param_constraint set, fail the fit') | |
| 1535 success = False | |
| 1536 else: | |
| 1537 logging.info(' -> Retry fitting with constraints') | |
| 1538 self.fit(centers, fit_type, peak_models, center_exprs, fwhm_max=fwhm_max, | |
| 1539 background_order=background_order, background_exp=background_exp, | |
| 1540 plot_components=plot_components, param_constraint=True) | |
| 1541 else: | |
| 1542 # RV: Obsolete Renormalize the data and results | |
| 1543 # print('\nAfter fit before renormalization in FitMultipeak:') | |
| 1544 # self._parameters.pretty_print() | |
| 1545 # self.print_fit_report() | |
| 1546 # self._renormalize() | |
| 1547 # print('\nAfter fit after renormalization in FitMultipeak:') | |
| 1548 # self._parameters.pretty_print() | |
| 1549 # self.print_fit_report() | |
| 1550 | |
| 1551 # Print report and plot components if requested | |
| 1552 if plot_components: | |
| 1553 self.print_fit_report() | |
| 1554 self.plot() | |
| 1555 | |
| 1556 return(success) | |
| 1557 | |
| 1558 def _create_model(self, centers, fit_type=None, peak_models=None, center_exprs=None, | |
| 1559 background_order=None, background_exp=False, param_constraint=False): | |
| 1560 """Create the multipeak model | |
| 1561 """ | |
| 1562 if isinstance(centers, (int, float)): | |
| 1563 centers = [centers] | |
| 1564 num_peaks = len(centers) | |
| 1565 if peak_models is None: | |
| 1566 peak_models = num_peaks*['gaussian'] | |
| 1567 elif isinstance(peak_models, str): | |
| 1568 peak_models = num_peaks*[peak_models] | |
| 1569 if len(peak_models) != num_peaks: | |
| 1570 raise ValueError(f'Inconsistent number of peaks in peak_models ({len(peak_models)} vs '+ | |
| 1571 f'{num_peaks})') | |
| 1572 if num_peaks == 1: | |
| 1573 if fit_type is not None: | |
| 1574 logging.debug('Ignoring fit_type input for fitting one peak') | |
| 1575 fit_type = None | |
| 1576 if center_exprs is not None: | |
| 1577 logging.debug('Ignoring center_exprs input for fitting one peak') | |
| 1578 center_exprs = None | |
| 1579 else: | |
| 1580 if fit_type == 'uniform': | |
| 1581 if center_exprs is None: | |
| 1582 center_exprs = [f'scale_factor*{cen}' for cen in centers] | |
| 1583 if len(center_exprs) != num_peaks: | |
| 1584 raise ValueError(f'Inconsistent number of peaks in center_exprs '+ | |
| 1585 f'({len(center_exprs)} vs {num_peaks})') | |
| 1586 elif fit_type == 'unconstrained' or fit_type is None: | |
| 1587 if center_exprs is not None: | |
| 1588 logging.warning('Ignoring center_exprs input for unconstrained fit') | |
| 1589 center_exprs = None | |
| 1590 else: | |
| 1591 raise ValueError(f'Invalid fit_type in fit_multigaussian {fit_type}') | |
| 1592 self._sigma_max = None | |
| 1593 if param_constraint: | |
| 1594 min_value = float_min | |
| 1595 if self._fwhm_max is not None: | |
| 1596 self._sigma_max = np.zeros(num_peaks) | |
| 1597 else: | |
| 1598 min_value = None | |
| 1599 | |
| 1600 # Reset the fit | |
| 1601 self._model = None | |
| 1602 self._parameters = Parameters() | |
| 1603 self._result = None | |
| 1604 | |
| 1605 # Add background model | |
| 1606 if background_order is not None: | |
| 1607 if background_order == 0: | |
| 1608 self.add_model('constant', prefix='background', parameters= | |
| 1609 {'name': 'c', 'value': float_min, 'min': min_value}) | |
| 1610 elif background_order == 1: | |
| 1611 self.add_model('linear', prefix='background', slope=0.0, intercept=0.0) | |
| 1612 elif background_order == 2: | |
| 1613 self.add_model('quadratic', prefix='background', a=0.0, b=0.0, c=0.0) | |
| 1614 else: | |
| 1615 raise ValueError(f'Invalid parameter background_order ({background_order})') | |
| 1616 if background_exp: | |
| 1617 self.add_model('exponential', prefix='background', parameters=( | |
| 1618 {'name': 'amplitude', 'value': float_min, 'min': min_value}, | |
| 1619 {'name': 'decay', 'value': float_min, 'min': min_value})) | |
| 1620 | |
| 1621 # Add peaks and guess initial fit parameters | |
| 1622 ast = Interpreter() | |
| 1623 if num_peaks == 1: | |
| 1624 height_init, cen_init, fwhm_init = self.guess_init_peak(self._x, self._y) | |
| 1625 if self._fwhm_max is not None and fwhm_init > self._fwhm_max: | |
| 1626 fwhm_init = self._fwhm_max | |
| 1627 ast(f'fwhm = {fwhm_init}') | |
| 1628 ast(f'height = {height_init}') | |
| 1629 sig_init = ast(fwhm_factor[peak_models[0]]) | |
| 1630 amp_init = ast(height_factor[peak_models[0]]) | |
| 1631 sig_max = None | |
| 1632 if self._sigma_max is not None: | |
| 1633 ast(f'fwhm = {self._fwhm_max}') | |
| 1634 sig_max = ast(fwhm_factor[peak_models[0]]) | |
| 1635 self._sigma_max[0] = sig_max | |
| 1636 self.add_model(peak_models[0], parameters=( | |
| 1637 {'name': 'amplitude', 'value': amp_init, 'min': min_value}, | |
| 1638 {'name': 'center', 'value': cen_init, 'min': min_value}, | |
| 1639 {'name': 'sigma', 'value': sig_init, 'min': min_value, 'max': sig_max})) | |
| 1640 else: | |
| 1641 if fit_type == 'uniform': | |
| 1642 self.add_parameter(name='scale_factor', value=1.0) | |
| 1643 for i in range(num_peaks): | |
| 1644 height_init, cen_init, fwhm_init = self.guess_init_peak(self._x, self._y, i, | |
| 1645 center_guess=centers) | |
| 1646 if self._fwhm_max is not None and fwhm_init > self._fwhm_max: | |
| 1647 fwhm_init = self._fwhm_max | |
| 1648 ast(f'fwhm = {fwhm_init}') | |
| 1649 ast(f'height = {height_init}') | |
| 1650 sig_init = ast(fwhm_factor[peak_models[i]]) | |
| 1651 amp_init = ast(height_factor[peak_models[i]]) | |
| 1652 sig_max = None | |
| 1653 if self._sigma_max is not None: | |
| 1654 ast(f'fwhm = {self._fwhm_max}') | |
| 1655 sig_max = ast(fwhm_factor[peak_models[i]]) | |
| 1656 self._sigma_max[i] = sig_max | |
| 1657 if fit_type == 'uniform': | |
| 1658 self.add_model(peak_models[i], prefix=f'peak{i+1}_', parameters=( | |
| 1659 {'name': 'amplitude', 'value': amp_init, 'min': min_value}, | |
| 1660 {'name': 'center', 'expr': center_exprs[i]}, | |
| 1661 {'name': 'sigma', 'value': sig_init, 'min': min_value, | |
| 1662 'max': sig_max})) | |
| 1663 else: | |
| 1664 self.add_model('gaussian', prefix=f'peak{i+1}_', parameters=( | |
| 1665 {'name': 'amplitude', 'value': amp_init, 'min': min_value}, | |
| 1666 {'name': 'center', 'value': cen_init, 'min': min_value}, | |
| 1667 {'name': 'sigma', 'value': sig_init, 'min': min_value, | |
| 1668 'max': sig_max})) | |
| 1669 | |
| 1670 def _check_validity(self): | |
| 1671 """Check for valid fit parameter results | |
| 1672 """ | |
| 1673 fit_failure = False | |
| 1674 index = compile(r'\d+') | |
| 1675 for name, par in self.best_parameters.items(): | |
| 1676 if 'background' in name: | |
| 1677 # if ((name == 'backgroundc' and par['value'] <= 0.0) or | |
| 1678 # (name.endswith('amplitude') and par['value'] <= 0.0) or | |
| 1679 if ((name.endswith('amplitude') and par['value'] <= 0.0) or | |
| 1680 (name.endswith('decay') and par['value'] <= 0.0)): | |
| 1681 logging.info(f'Invalid fit result for {name} ({par["value"]})') | |
| 1682 fit_failure = True | |
| 1683 elif (((name.endswith('amplitude') or name.endswith('height')) and | |
| 1684 par['value'] <= 0.0) or | |
| 1685 ((name.endswith('sigma') or name.endswith('fwhm')) and par['value'] <= 0.0) or | |
| 1686 (name.endswith('center') and par['value'] <= 0.0) or | |
| 1687 (name == 'scale_factor' and par['value'] <= 0.0)): | |
| 1688 logging.info(f'Invalid fit result for {name} ({par["value"]})') | |
| 1689 fit_failure = True | |
| 1690 if name.endswith('sigma') and self._sigma_max is not None: | |
| 1691 if name == 'sigma': | |
| 1692 sigma_max = self._sigma_max[0] | |
| 1693 else: | |
| 1694 sigma_max = self._sigma_max[int(index.search(name).group())-1] | |
| 1695 if par['value'] > sigma_max: | |
| 1696 logging.info(f'Invalid fit result for {name} ({par["value"]})') | |
| 1697 fit_failure = True | |
| 1698 elif par['value'] == sigma_max: | |
| 1699 logging.warning(f'Edge result on for {name} ({par["value"]})') | |
| 1700 if name.endswith('fwhm') and self._fwhm_max is not None: | |
| 1701 if par['value'] > self._fwhm_max: | |
| 1702 logging.info(f'Invalid fit result for {name} ({par["value"]})') | |
| 1703 fit_failure = True | |
| 1704 elif par['value'] == self._fwhm_max: | |
| 1705 logging.warning(f'Edge result on for {name} ({par["value"]})') | |
| 1706 return(fit_failure) | |
| 1707 | |
| 1708 | |
| 1709 class FitMap(Fit): | |
| 1710 """Fit a map of data | |
| 1711 """ | |
| 1712 def __init__(self, ymap, x=None, models=None, normalize=True, transpose=None, **kwargs): | |
| 1713 super().__init__(None) | |
| 1714 self._best_errors = None | |
| 1715 self._best_fit = None | |
| 1716 self._best_parameters = None | |
| 1717 self._best_values = None | |
| 1718 self._inv_transpose = None | |
| 1719 self._max_nfev = None | |
| 1720 self._memfolder = None | |
| 1721 self._new_parameters = None | |
| 1722 self._out_of_bounds = None | |
| 1723 self._plot = False | |
| 1724 self._print_report = False | |
| 1725 self._redchi = None | |
| 1726 self._redchi_cutoff = 0.1 | |
| 1727 self._skip_init = True | |
| 1728 self._success = None | |
| 1729 self._transpose = None | |
| 1730 self._try_no_bounds = True | |
| 1731 | |
| 1732 # At this point the fastest index should always be the signal dimension so that the slowest | |
| 1733 # ndim-1 dimensions are the map dimensions | |
| 1734 if isinstance(ymap, (tuple, list, np.ndarray)): | |
| 1735 self._x = np.asarray(x) | |
| 1736 elif have_xarray and isinstance(ymap, xr.DataArray): | |
| 1737 if x is not None: | |
| 1738 logging.warning('Ignoring superfluous input x ({x}) in Fit.__init__') | |
| 1739 self._x = np.asarray(ymap[ymap.dims[-1]]) | |
| 1740 else: | |
| 1741 illegal_value(ymap, 'ymap', 'FitMap:__init__', raise_error=True) | |
| 1742 self._ymap = ymap | |
| 1743 | |
| 1744 # Verify the input parameters | |
| 1745 if self._x.ndim != 1: | |
| 1746 raise ValueError(f'Invalid dimension for input x {self._x.ndim}') | |
| 1747 if self._ymap.ndim < 2: | |
| 1748 raise ValueError('Invalid number of dimension of the input dataset '+ | |
| 1749 f'{self._ymap.ndim}') | |
| 1750 if self._x.size != self._ymap.shape[-1]: | |
| 1751 raise ValueError(f'Inconsistent x and y dimensions ({self._x.size} vs '+ | |
| 1752 f'{self._ymap.shape[-1]})') | |
| 1753 if not isinstance(normalize, bool): | |
| 1754 logging.warning(f'Invalid value for normalize ({normalize}) in Fit.__init__: '+ | |
| 1755 'setting normalize to True') | |
| 1756 normalize = True | |
| 1757 if isinstance(transpose, bool) and not transpose: | |
| 1758 transpose = None | |
| 1759 if transpose is not None and self._ymap.ndim < 3: | |
| 1760 logging.warning(f'Transpose meaningless for {self._ymap.ndim-1}D data maps: ignoring '+ | |
| 1761 'transpose') | |
| 1762 if transpose is not None: | |
| 1763 if self._ymap.ndim == 3 and isinstance(transpose, bool) and transpose: | |
| 1764 self._transpose = (1, 0) | |
| 1765 elif not isinstance(transpose, (tuple, list)): | |
| 1766 logging.warning(f'Invalid data type for transpose ({transpose}, '+ | |
| 1767 f'{type(transpose)}) in Fit.__init__: setting transpose to False') | |
| 1768 elif len(transpose) != self._ymap.ndim-1: | |
| 1769 logging.warning(f'Invalid dimension for transpose ({transpose}, must be equal to '+ | |
| 1770 f'{self._ymap.ndim-1}) in Fit.__init__: setting transpose to False') | |
| 1771 elif any(i not in transpose for i in range(len(transpose))): | |
| 1772 logging.warning(f'Invalid index in transpose ({transpose}) '+ | |
| 1773 f'in Fit.__init__: setting transpose to False') | |
| 1774 elif not all(i==transpose[i] for i in range(self._ymap.ndim-1)): | |
| 1775 self._transpose = transpose | |
| 1776 if self._transpose is not None: | |
| 1777 self._inv_transpose = tuple(self._transpose.index(i) | |
| 1778 for i in range(len(self._transpose))) | |
| 1779 | |
| 1780 # Flatten the map (transpose if requested) | |
| 1781 # Store the flattened map in self._ymap_norm, whether normalized or not | |
| 1782 if self._transpose is not None: | |
| 1783 self._ymap_norm = np.transpose(np.asarray(self._ymap), list(self._transpose)+ | |
| 1784 [len(self._transpose)]) | |
| 1785 else: | |
| 1786 self._ymap_norm = np.asarray(self._ymap) | |
| 1787 self._map_dim = int(self._ymap_norm.size/self._x.size) | |
| 1788 self._map_shape = self._ymap_norm.shape[:-1] | |
| 1789 self._ymap_norm = np.reshape(self._ymap_norm, (self._map_dim, self._x.size)) | |
| 1790 | |
| 1791 # Check if a mask is provided | |
| 1792 if 'mask' in kwargs: | |
| 1793 self._mask = kwargs.pop('mask') | |
| 1794 if self._mask is None: | |
| 1795 ymap_min = float(self._ymap_norm.min()) | |
| 1796 ymap_max = float(self._ymap_norm.max()) | |
| 1797 else: | |
| 1798 self._mask = np.asarray(self._mask).astype(bool) | |
| 1799 if self._x.size != self._mask.size: | |
| 1800 raise ValueError(f'Inconsistent mask dimension ({self._x.size} vs '+ | |
| 1801 f'{self._mask.size})') | |
| 1802 ymap_masked = np.asarray(self._ymap_norm)[:,~self._mask] | |
| 1803 ymap_min = float(ymap_masked.min()) | |
| 1804 ymap_max = float(ymap_masked.max()) | |
| 1805 | |
| 1806 # Normalize the data | |
| 1807 self._y_range = ymap_max-ymap_min | |
| 1808 if normalize and self._y_range > 0.0: | |
| 1809 self._norm = (ymap_min, self._y_range) | |
| 1810 self._ymap_norm = (self._ymap_norm-self._norm[0])/self._norm[1] | |
| 1811 else: | |
| 1812 self._redchi_cutoff *= self._y_range**2 | |
| 1813 if models is not None: | |
| 1814 if callable(models) or isinstance(models, str): | |
| 1815 kwargs = self.add_model(models, **kwargs) | |
| 1816 elif isinstance(models, (tuple, list)): | |
| 1817 for model in models: | |
| 1818 kwargs = self.add_model(model, **kwargs) | |
| 1819 self.fit(**kwargs) | |
| 1820 | |
| 1821 @classmethod | |
| 1822 def fit_map(cls, ymap, models, x=None, normalize=True, **kwargs): | |
| 1823 return(cls(ymap, x=x, models=models, normalize=normalize, **kwargs)) | |
| 1824 | |
| 1825 @property | |
| 1826 def best_errors(self): | |
| 1827 return(self._best_errors) | |
| 1828 | |
| 1829 @property | |
| 1830 def best_fit(self): | |
| 1831 return(self._best_fit) | |
| 1832 | |
| 1833 @property | |
| 1834 def best_results(self): | |
| 1835 """Convert the input data array to a data set and add the fit results. | |
| 1836 """ | |
| 1837 if self.best_values is None or self.best_errors is None or self.best_fit is None: | |
| 1838 return(None) | |
| 1839 if not have_xarray: | |
| 1840 logging.warning('Unable to load xarray module') | |
| 1841 return(None) | |
| 1842 best_values = self.best_values | |
| 1843 best_errors = self.best_errors | |
| 1844 if isinstance(self._ymap, xr.DataArray): | |
| 1845 best_results = self._ymap.to_dataset() | |
| 1846 dims = self._ymap.dims | |
| 1847 fit_name = f'{self._ymap.name}_fit' | |
| 1848 else: | |
| 1849 coords = {f'dim{n}_index':([f'dim{n}_index'], range(self._ymap.shape[n])) | |
| 1850 for n in range(self._ymap.ndim-1)} | |
| 1851 coords['x'] = (['x'], self._x) | |
| 1852 dims = list(coords.keys()) | |
| 1853 best_results = xr.Dataset(coords=coords) | |
| 1854 best_results['y'] = (dims, self._ymap) | |
| 1855 fit_name = 'y_fit' | |
| 1856 best_results[fit_name] = (dims, self.best_fit) | |
| 1857 if self._mask is not None: | |
| 1858 best_results['mask'] = self._mask | |
| 1859 for n in range(best_values.shape[0]): | |
| 1860 best_results[f'{self._best_parameters[n]}_values'] = (dims[:-1], best_values[n]) | |
| 1861 best_results[f'{self._best_parameters[n]}_errors'] = (dims[:-1], best_errors[n]) | |
| 1862 best_results.attrs['components'] = self.components | |
| 1863 return(best_results) | |
| 1864 | |
| 1865 @property | |
| 1866 def best_values(self): | |
| 1867 return(self._best_values) | |
| 1868 | |
| 1869 @property | |
| 1870 def chisqr(self): | |
| 1871 logging.warning('property chisqr not defined for fit.FitMap') | |
| 1872 return(None) | |
| 1873 | |
| 1874 @property | |
| 1875 def components(self): | |
| 1876 components = {} | |
| 1877 if self._result is None: | |
| 1878 logging.warning('Unable to collect components in FitMap.components') | |
| 1879 return(components) | |
| 1880 for component in self._result.components: | |
| 1881 if 'tmp_normalization_offset_c' in component.param_names: | |
| 1882 continue | |
| 1883 parameters = {} | |
| 1884 for name in component.param_names: | |
| 1885 if self._parameters[name].vary: | |
| 1886 parameters[name] = {'free': True} | |
| 1887 elif self._parameters[name].expr is not None: | |
| 1888 parameters[name] = {'free': False, 'expr': self._parameters[name].expr} | |
| 1889 else: | |
| 1890 parameters[name] = {'free': False, 'value': self.init_parameters[name]['value']} | |
| 1891 expr = None | |
| 1892 if isinstance(component, ExpressionModel): | |
| 1893 name = component._name | |
| 1894 if name[-1] == '_': | |
| 1895 name = name[:-1] | |
| 1896 expr = component.expr | |
| 1897 else: | |
| 1898 prefix = component.prefix | |
| 1899 if len(prefix): | |
| 1900 if prefix[-1] == '_': | |
| 1901 prefix = prefix[:-1] | |
| 1902 name = f'{prefix} ({component._name})' | |
| 1903 else: | |
| 1904 name = f'{component._name}' | |
| 1905 if expr is None: | |
| 1906 components[name] = {'parameters': parameters} | |
| 1907 else: | |
| 1908 components[name] = {'expr': expr, 'parameters': parameters} | |
| 1909 return(components) | |
| 1910 | |
| 1911 @property | |
| 1912 def covar(self): | |
| 1913 logging.warning('property covar not defined for fit.FitMap') | |
| 1914 return(None) | |
| 1915 | |
| 1916 @property | |
| 1917 def max_nfev(self): | |
| 1918 return(self._max_nfev) | |
| 1919 | |
| 1920 @property | |
| 1921 def num_func_eval(self): | |
| 1922 logging.warning('property num_func_eval not defined for fit.FitMap') | |
| 1923 return(None) | |
| 1924 | |
| 1925 @property | |
| 1926 def out_of_bounds(self): | |
| 1927 return(self._out_of_bounds) | |
| 1928 | |
| 1929 @property | |
| 1930 def redchi(self): | |
| 1931 return(self._redchi) | |
| 1932 | |
| 1933 @property | |
| 1934 def residual(self): | |
| 1935 if self.best_fit is None: | |
| 1936 return(None) | |
| 1937 if self._mask is None: | |
| 1938 return(np.asarray(self._ymap)-self.best_fit) | |
| 1939 else: | |
| 1940 ymap_flat = np.reshape(np.asarray(self._ymap), (self._map_dim, self._x.size)) | |
| 1941 ymap_flat_masked = ymap_flat[:,~self._mask] | |
| 1942 ymap_masked = np.reshape(ymap_flat_masked, | |
| 1943 list(self._map_shape)+[ymap_flat_masked.shape[-1]]) | |
| 1944 return(ymap_masked-self.best_fit) | |
| 1945 | |
| 1946 @property | |
| 1947 def success(self): | |
| 1948 return(self._success) | |
| 1949 | |
| 1950 @property | |
| 1951 def var_names(self): | |
| 1952 logging.warning('property var_names not defined for fit.FitMap') | |
| 1953 return(None) | |
| 1954 | |
| 1955 @property | |
| 1956 def y(self): | |
| 1957 logging.warning('property y not defined for fit.FitMap') | |
| 1958 return(None) | |
| 1959 | |
| 1960 @property | |
| 1961 def ymap(self): | |
| 1962 return(self._ymap) | |
| 1963 | |
| 1964 def best_parameters(self, dims=None): | |
| 1965 if dims is None: | |
| 1966 return(self._best_parameters) | |
| 1967 if not isinstance(dims, (list, tuple)) or len(dims) != len(self._map_shape): | |
| 1968 illegal_value(dims, 'dims', 'FitMap.best_parameters', raise_error=True) | |
| 1969 if self.best_values is None or self.best_errors is None: | |
| 1970 logging.warning(f'Unable to obtain best parameter values for dims = {dims} in '+ | |
| 1971 'FitMap.best_parameters') | |
| 1972 return({}) | |
| 1973 # Create current parameters | |
| 1974 parameters = deepcopy(self._parameters) | |
| 1975 for n, name in enumerate(self._best_parameters): | |
| 1976 if self._parameters[name].vary: | |
| 1977 parameters[name].set(value=self.best_values[n][dims]) | |
| 1978 parameters[name].stderr = self.best_errors[n][dims] | |
| 1979 parameters_dict = {} | |
| 1980 for name in sorted(parameters): | |
| 1981 if name != 'tmp_normalization_offset_c': | |
| 1982 par = parameters[name] | |
| 1983 parameters_dict[name] = {'value': par.value, 'error': par.stderr, | |
| 1984 'init_value': self.init_parameters[name]['value'], 'min': par.min, | |
| 1985 'max': par.max, 'vary': par.vary, 'expr': par.expr} | |
| 1986 return(parameters_dict) | |
| 1987 | |
| 1988 def freemem(self): | |
| 1989 if self._memfolder is None: | |
| 1990 return | |
| 1991 try: | |
| 1992 rmtree(self._memfolder) | |
| 1993 self._memfolder = None | |
| 1994 except: | |
| 1995 logging.warning('Could not clean-up automatically.') | |
| 1996 | |
| 1997 def plot(self, dims, y_title=None, plot_residual=False, plot_comp_legends=False, | |
| 1998 plot_masked_data=True): | |
| 1999 if not isinstance(dims, (list, tuple)) or len(dims) != len(self._map_shape): | |
| 2000 illegal_value(dims, 'dims', 'FitMap.plot', raise_error=True) | |
| 2001 if self._result is None or self.best_fit is None or self.best_values is None: | |
| 2002 logging.warning(f'Unable to plot fit for dims = {dims} in FitMap.plot') | |
| 2003 return | |
| 2004 if y_title is None or not isinstance(y_title, str): | |
| 2005 y_title = 'data' | |
| 2006 if self._mask is None: | |
| 2007 mask = np.zeros(self._x.size).astype(bool) | |
| 2008 plot_masked_data = False | |
| 2009 else: | |
| 2010 mask = self._mask | |
| 2011 plots = [(self._x, np.asarray(self._ymap[dims]), 'b.')] | |
| 2012 legend = [y_title] | |
| 2013 if plot_masked_data: | |
| 2014 plots += [(self._x[mask], np.asarray(self._ymap)[(*dims,mask)], 'bx')] | |
| 2015 legend += ['masked data'] | |
| 2016 plots += [(self._x[~mask], self.best_fit[dims], 'k-')] | |
| 2017 legend += ['best fit'] | |
| 2018 if plot_residual: | |
| 2019 plots += [(self._x[~mask], self.residual[dims], 'k--')] | |
| 2020 legend += ['residual'] | |
| 2021 # Create current parameters | |
| 2022 parameters = deepcopy(self._parameters) | |
| 2023 for name in self._best_parameters: | |
| 2024 if self._parameters[name].vary: | |
| 2025 parameters[name].set(value= | |
| 2026 self.best_values[self._best_parameters.index(name)][dims]) | |
| 2027 for component in self._result.components: | |
| 2028 if 'tmp_normalization_offset_c' in component.param_names: | |
| 2029 continue | |
| 2030 if isinstance(component, ExpressionModel): | |
| 2031 prefix = component._name | |
| 2032 if prefix[-1] == '_': | |
| 2033 prefix = prefix[:-1] | |
| 2034 modelname = f'{prefix}: {component.expr}' | |
| 2035 else: | |
| 2036 prefix = component.prefix | |
| 2037 if len(prefix): | |
| 2038 if prefix[-1] == '_': | |
| 2039 prefix = prefix[:-1] | |
| 2040 modelname = f'{prefix} ({component._name})' | |
| 2041 else: | |
| 2042 modelname = f'{component._name}' | |
| 2043 if len(modelname) > 20: | |
| 2044 modelname = f'{modelname[0:16]} ...' | |
| 2045 y = component.eval(params=parameters, x=self._x[~mask]) | |
| 2046 if isinstance(y, (int, float)): | |
| 2047 y *= np.ones(self._x[~mask].size) | |
| 2048 plots += [(self._x[~mask], y, '--')] | |
| 2049 if plot_comp_legends: | |
| 2050 legend.append(modelname) | |
| 2051 quick_plot(tuple(plots), legend=legend, title=str(dims), block=True) | |
| 2052 | |
| 2053 def fit(self, **kwargs): | |
| 2054 # t0 = time() | |
| 2055 # Check input parameters | |
| 2056 if self._model is None: | |
| 2057 logging.error('Undefined fit model') | |
| 2058 if 'num_proc' in kwargs: | |
| 2059 num_proc = kwargs.pop('num_proc') | |
| 2060 if not is_int(num_proc, ge=1): | |
| 2061 illegal_value(num_proc, 'num_proc', 'FitMap.fit', raise_error=True) | |
| 2062 else: | |
| 2063 num_proc = cpu_count() | |
| 2064 if num_proc > 1 and not have_joblib: | |
| 2065 logging.warning(f'Missing joblib in the conda environment, running FitMap serially') | |
| 2066 num_proc = 1 | |
| 2067 if num_proc > cpu_count(): | |
| 2068 logging.warning(f'The requested number of processors ({num_proc}) exceeds the maximum '+ | |
| 2069 f'number of processors, num_proc reduced to ({cpu_count()})') | |
| 2070 num_proc = cpu_count() | |
| 2071 if 'try_no_bounds' in kwargs: | |
| 2072 self._try_no_bounds = kwargs.pop('try_no_bounds') | |
| 2073 if not isinstance(self._try_no_bounds, bool): | |
| 2074 illegal_value(self._try_no_bounds, 'try_no_bounds', 'FitMap.fit', raise_error=True) | |
| 2075 if 'redchi_cutoff' in kwargs: | |
| 2076 self._redchi_cutoff = kwargs.pop('redchi_cutoff') | |
| 2077 if not is_num(self._redchi_cutoff, gt=0): | |
| 2078 illegal_value(self._redchi_cutoff, 'redchi_cutoff', 'FitMap.fit', raise_error=True) | |
| 2079 if 'print_report' in kwargs: | |
| 2080 self._print_report = kwargs.pop('print_report') | |
| 2081 if not isinstance(self._print_report, bool): | |
| 2082 illegal_value(self._print_report, 'print_report', 'FitMap.fit', raise_error=True) | |
| 2083 if 'plot' in kwargs: | |
| 2084 self._plot = kwargs.pop('plot') | |
| 2085 if not isinstance(self._plot, bool): | |
| 2086 illegal_value(self._plot, 'plot', 'FitMap.fit', raise_error=True) | |
| 2087 if 'skip_init' in kwargs: | |
| 2088 self._skip_init = kwargs.pop('skip_init') | |
| 2089 if not isinstance(self._skip_init, bool): | |
| 2090 illegal_value(self._skip_init, 'skip_init', 'FitMap.fit', raise_error=True) | |
| 2091 | |
| 2092 # Apply mask if supplied: | |
| 2093 if 'mask' in kwargs: | |
| 2094 self._mask = kwargs.pop('mask') | |
| 2095 if self._mask is not None: | |
| 2096 self._mask = np.asarray(self._mask).astype(bool) | |
| 2097 if self._x.size != self._mask.size: | |
| 2098 raise ValueError(f'Inconsistent x and mask dimensions ({self._x.size} vs '+ | |
| 2099 f'{self._mask.size})') | |
| 2100 | |
| 2101 # Add constant offset for a normalized single component model | |
| 2102 if self._result is None and self._norm is not None and self._norm[0]: | |
| 2103 self.add_model('constant', prefix='tmp_normalization_offset_', parameters={'name': 'c', | |
| 2104 'value': -self._norm[0], 'vary': False, 'norm': True}) | |
| 2105 #'value': -self._norm[0]/self._norm[1], 'vary': False, 'norm': False}) | |
| 2106 | |
| 2107 # Adjust existing parameters for refit: | |
| 2108 if 'parameters' in kwargs: | |
| 2109 # print('\nIn FitMap before adjusting existing parameters for refit:') | |
| 2110 # self._parameters.pretty_print() | |
| 2111 # if self._result is None: | |
| 2112 # raise ValueError('Invalid parameter parameters ({parameters})') | |
| 2113 # if self._best_values is None: | |
| 2114 # raise ValueError('Valid self._best_values required for refitting in FitMap.fit') | |
| 2115 parameters = kwargs.pop('parameters') | |
| 2116 # print(f'\nparameters:\n{parameters}') | |
| 2117 if isinstance(parameters, dict): | |
| 2118 parameters = (parameters, ) | |
| 2119 elif not is_dict_series(parameters): | |
| 2120 illegal_value(parameters, 'parameters', 'Fit.fit', raise_error=True) | |
| 2121 for par in parameters: | |
| 2122 name = par['name'] | |
| 2123 if name not in self._parameters: | |
| 2124 raise ValueError(f'Unable to match {name} parameter {par} to an existing one') | |
| 2125 if self._parameters[name].expr is not None: | |
| 2126 raise ValueError(f'Unable to modify {name} parameter {par} (currently an '+ | |
| 2127 'expression)') | |
| 2128 value = par.get('value') | |
| 2129 vary = par.get('vary') | |
| 2130 if par.get('expr') is not None: | |
| 2131 raise KeyError(f'Illegal "expr" key in {name} parameter {par}') | |
| 2132 self._parameters[name].set(value=value, vary=vary, min=par.get('min'), | |
| 2133 max=par.get('max')) | |
| 2134 # Overwrite existing best values for fixed parameters when a value is specified | |
| 2135 # print(f'best values befored resetting:\n{self._best_values}') | |
| 2136 if isinstance(value, (int, float)) and vary is False: | |
| 2137 for i, nname in enumerate(self._best_parameters): | |
| 2138 if nname == name: | |
| 2139 self._best_values[i] = value | |
| 2140 # print(f'best values after resetting (value={value}, vary={vary}):\n{self._best_values}') | |
| 2141 #RV print('\nIn FitMap after adjusting existing parameters for refit:') | |
| 2142 #RV self._parameters.pretty_print() | |
| 2143 | |
| 2144 # Check for uninitialized parameters | |
| 2145 for name, par in self._parameters.items(): | |
| 2146 if par.expr is None: | |
| 2147 value = par.value | |
| 2148 if value is None or np.isinf(value) or np.isnan(value): | |
| 2149 value = 1.0 | |
| 2150 if self._norm is None or name not in self._parameter_norms: | |
| 2151 self._parameters[name].set(value=value) | |
| 2152 elif self._parameter_norms[name]: | |
| 2153 self._parameters[name].set(value=value*self._norm[1]) | |
| 2154 | |
| 2155 # Create the best parameter list, consisting of all varying parameters plus the expression | |
| 2156 # parameters in order to collect their errors | |
| 2157 if self._result is None: | |
| 2158 # Initial fit | |
| 2159 assert(self._best_parameters is None) | |
| 2160 self._best_parameters = [name for name, par in self._parameters.items() | |
| 2161 if par.vary or par.expr is not None] | |
| 2162 num_new_parameters = 0 | |
| 2163 else: | |
| 2164 # Refit | |
| 2165 assert(len(self._best_parameters)) | |
| 2166 self._new_parameters = [name for name, par in self._parameters.items() | |
| 2167 if name != 'tmp_normalization_offset_c' and name not in self._best_parameters and | |
| 2168 (par.vary or par.expr is not None)] | |
| 2169 num_new_parameters = len(self._new_parameters) | |
| 2170 num_best_parameters = len(self._best_parameters) | |
| 2171 | |
| 2172 # Flatten and normalize the best values of the previous fit, remove the remaining results | |
| 2173 # of the previous fit | |
| 2174 if self._result is not None: | |
| 2175 # print('\nBefore flatten and normalize:') | |
| 2176 # print(f'self._best_values:\n{self._best_values}') | |
| 2177 self._out_of_bounds = None | |
| 2178 self._max_nfev = None | |
| 2179 self._redchi = None | |
| 2180 self._success = None | |
| 2181 self._best_fit = None | |
| 2182 self._best_errors = None | |
| 2183 assert(self._best_values is not None) | |
| 2184 assert(self._best_values.shape[0] == num_best_parameters) | |
| 2185 assert(self._best_values.shape[1:] == self._map_shape) | |
| 2186 if self._transpose is not None: | |
| 2187 self._best_values = np.transpose(self._best_values, | |
| 2188 [0]+[i+1 for i in self._transpose]) | |
| 2189 self._best_values = [np.reshape(self._best_values[i], self._map_dim) | |
| 2190 for i in range(num_best_parameters)] | |
| 2191 if self._norm is not None: | |
| 2192 for i, name in enumerate(self._best_parameters): | |
| 2193 if self._parameter_norms.get(name, False): | |
| 2194 self._best_values[i] /= self._norm[1] | |
| 2195 #RV print('\nAfter flatten and normalize:') | |
| 2196 #RV print(f'self._best_values:\n{self._best_values}') | |
| 2197 | |
| 2198 # Normalize the initial parameters (and best values for a refit) | |
| 2199 # print('\nIn FitMap before normalize:') | |
| 2200 # self._parameters.pretty_print() | |
| 2201 # print(f'\nparameter_norms:\n{self._parameter_norms}\n') | |
| 2202 self._normalize() | |
| 2203 # print('\nIn FitMap after normalize:') | |
| 2204 # self._parameters.pretty_print() | |
| 2205 # print(f'\nparameter_norms:\n{self._parameter_norms}\n') | |
| 2206 | |
| 2207 # Prevent initial values from sitting at boundaries | |
| 2208 self._parameter_bounds = {name:{'min': par.min, 'max': par.max} | |
| 2209 for name, par in self._parameters.items() if par.vary} | |
| 2210 for name, par in self._parameters.items(): | |
| 2211 if par.vary: | |
| 2212 par.set(value=self._reset_par_at_boundary(par, par.value)) | |
| 2213 # print('\nAfter checking boundaries:') | |
| 2214 # self._parameters.pretty_print() | |
| 2215 | |
| 2216 # Set parameter bounds to unbound (only use bounds when fit fails) | |
| 2217 if self._try_no_bounds: | |
| 2218 for name in self._parameter_bounds.keys(): | |
| 2219 self._parameters[name].set(min=-np.inf, max=np.inf) | |
| 2220 | |
| 2221 # Allocate memory to store fit results | |
| 2222 if self._mask is None: | |
| 2223 x_size = self._x.size | |
| 2224 else: | |
| 2225 x_size = self._x[~self._mask].size | |
| 2226 if num_proc == 1: | |
| 2227 self._out_of_bounds_flat = np.zeros(self._map_dim, dtype=bool) | |
| 2228 self._max_nfev_flat = np.zeros(self._map_dim, dtype=bool) | |
| 2229 self._redchi_flat = np.zeros(self._map_dim, dtype=np.float64) | |
| 2230 self._success_flat = np.zeros(self._map_dim, dtype=bool) | |
| 2231 self._best_fit_flat = np.zeros((self._map_dim, x_size), | |
| 2232 dtype=self._ymap_norm.dtype) | |
| 2233 self._best_errors_flat = [np.zeros(self._map_dim, dtype=np.float64) | |
| 2234 for _ in range(num_best_parameters+num_new_parameters)] | |
| 2235 if self._result is None: | |
| 2236 self._best_values_flat = [np.zeros(self._map_dim, dtype=np.float64) | |
| 2237 for _ in range(num_best_parameters)] | |
| 2238 else: | |
| 2239 self._best_values_flat = self._best_values | |
| 2240 self._best_values_flat += [np.zeros(self._map_dim, dtype=np.float64) | |
| 2241 for _ in range(num_new_parameters)] | |
| 2242 else: | |
| 2243 self._memfolder = './joblib_memmap' | |
| 2244 try: | |
| 2245 mkdir(self._memfolder) | |
| 2246 except FileExistsError: | |
| 2247 pass | |
| 2248 filename_memmap = path.join(self._memfolder, 'out_of_bounds_memmap') | |
| 2249 self._out_of_bounds_flat = np.memmap(filename_memmap, dtype=bool, | |
| 2250 shape=(self._map_dim), mode='w+') | |
| 2251 filename_memmap = path.join(self._memfolder, 'max_nfev_memmap') | |
| 2252 self._max_nfev_flat = np.memmap(filename_memmap, dtype=bool, | |
| 2253 shape=(self._map_dim), mode='w+') | |
| 2254 filename_memmap = path.join(self._memfolder, 'redchi_memmap') | |
| 2255 self._redchi_flat = np.memmap(filename_memmap, dtype=np.float64, | |
| 2256 shape=(self._map_dim), mode='w+') | |
| 2257 filename_memmap = path.join(self._memfolder, 'success_memmap') | |
| 2258 self._success_flat = np.memmap(filename_memmap, dtype=bool, | |
| 2259 shape=(self._map_dim), mode='w+') | |
| 2260 filename_memmap = path.join(self._memfolder, 'best_fit_memmap') | |
| 2261 self._best_fit_flat = np.memmap(filename_memmap, dtype=self._ymap_norm.dtype, | |
| 2262 shape=(self._map_dim, x_size), mode='w+') | |
| 2263 self._best_errors_flat = [] | |
| 2264 for i in range(num_best_parameters+num_new_parameters): | |
| 2265 filename_memmap = path.join(self._memfolder, f'best_errors_memmap_{i}') | |
| 2266 self._best_errors_flat.append(np.memmap(filename_memmap, dtype=np.float64, | |
| 2267 shape=self._map_dim, mode='w+')) | |
| 2268 self._best_values_flat = [] | |
| 2269 for i in range(num_best_parameters): | |
| 2270 filename_memmap = path.join(self._memfolder, f'best_values_memmap_{i}') | |
| 2271 self._best_values_flat.append(np.memmap(filename_memmap, dtype=np.float64, | |
| 2272 shape=self._map_dim, mode='w+')) | |
| 2273 if self._result is not None: | |
| 2274 self._best_values_flat[i][:] = self._best_values[i][:] | |
| 2275 for i in range(num_new_parameters): | |
| 2276 filename_memmap = path.join(self._memfolder, | |
| 2277 f'best_values_memmap_{i+num_best_parameters}') | |
| 2278 self._best_values_flat.append(np.memmap(filename_memmap, dtype=np.float64, | |
| 2279 shape=self._map_dim, mode='w+')) | |
| 2280 | |
| 2281 # Update the best parameter list | |
| 2282 if num_new_parameters: | |
| 2283 self._best_parameters += self._new_parameters | |
| 2284 | |
| 2285 # Perform the first fit to get model component info and initial parameters | |
| 2286 current_best_values = {} | |
| 2287 # print(f'0 before:\n{current_best_values}') | |
| 2288 # t1 = time() | |
| 2289 self._result = self._fit(0, current_best_values, return_result=True, **kwargs) | |
| 2290 # t2 = time() | |
| 2291 # print(f'0 after:\n{current_best_values}') | |
| 2292 # print('\nAfter the first fit:') | |
| 2293 # self._parameters.pretty_print() | |
| 2294 # print(self._result.fit_report(show_correl=False)) | |
| 2295 | |
| 2296 # Remove all irrelevant content from self._result | |
| 2297 for attr in ('_abort', 'aborted', 'aic', 'best_fit', 'best_values', 'bic', 'calc_covar', | |
| 2298 'call_kws', 'chisqr', 'ci_out', 'col_deriv', 'covar', 'data', 'errorbars', | |
| 2299 'flatchain', 'ier', 'init_vals', 'init_fit', 'iter_cb', 'jacfcn', 'kws', | |
| 2300 'last_internal_values', 'lmdif_message', 'message', 'method', 'nan_policy', | |
| 2301 'ndata', 'nfev', 'nfree', 'params', 'redchi', 'reduce_fcn', 'residual', 'result', | |
| 2302 'scale_covar', 'show_candidates', 'calc_covar', 'success', 'userargs', 'userfcn', | |
| 2303 'userkws', 'values', 'var_names', 'weights', 'user_options'): | |
| 2304 try: | |
| 2305 delattr(self._result, attr) | |
| 2306 except AttributeError: | |
| 2307 # logging.warning(f'Unknown attribute {attr} in fit.FtMap._cleanup_result') | |
| 2308 pass | |
| 2309 | |
| 2310 # t3 = time() | |
| 2311 if num_proc == 1: | |
| 2312 # Perform the remaining fits serially | |
| 2313 for n in range(1, self._map_dim): | |
| 2314 # print(f'{n} before:\n{current_best_values}') | |
| 2315 self._fit(n, current_best_values, **kwargs) | |
| 2316 # print(f'{n} after:\n{current_best_values}') | |
| 2317 else: | |
| 2318 # Perform the remaining fits in parallel | |
| 2319 num_fit = self._map_dim-1 | |
| 2320 # print(f'num_fit = {num_fit}') | |
| 2321 if num_proc > num_fit: | |
| 2322 logging.warning(f'The requested number of processors ({num_proc}) exceeds the '+ | |
| 2323 f'number of fits, num_proc reduced to ({num_fit})') | |
| 2324 num_proc = num_fit | |
| 2325 num_fit_per_proc = 1 | |
| 2326 else: | |
| 2327 num_fit_per_proc = round((num_fit)/num_proc) | |
| 2328 if num_proc*num_fit_per_proc < num_fit: | |
| 2329 num_fit_per_proc +=1 | |
| 2330 # print(f'num_fit_per_proc = {num_fit_per_proc}') | |
| 2331 num_fit_batch = min(num_fit_per_proc, 40) | |
| 2332 # print(f'num_fit_batch = {num_fit_batch}') | |
| 2333 with Parallel(n_jobs=num_proc) as parallel: | |
| 2334 parallel(delayed(self._fit_parallel)(current_best_values, num_fit_batch, | |
| 2335 n_start, **kwargs) for n_start in range(1, self._map_dim, num_fit_batch)) | |
| 2336 # t4 = time() | |
| 2337 | |
| 2338 # Renormalize the initial parameters for external use | |
| 2339 if self._norm is not None and self._normalized: | |
| 2340 init_values = {} | |
| 2341 for name, value in self._result.init_values.items(): | |
| 2342 if name not in self._parameter_norms or self._parameters[name].expr is not None: | |
| 2343 init_values[name] = value | |
| 2344 elif self._parameter_norms[name]: | |
| 2345 init_values[name] = value*self._norm[1] | |
| 2346 self._result.init_values = init_values | |
| 2347 for name, par in self._result.init_params.items(): | |
| 2348 if par.expr is None and self._parameter_norms.get(name, False): | |
| 2349 _min = par.min | |
| 2350 _max = par.max | |
| 2351 value = par.value*self._norm[1] | |
| 2352 if not np.isinf(_min) and abs(_min) != float_min: | |
| 2353 _min *= self._norm[1] | |
| 2354 if not np.isinf(_max) and abs(_max) != float_min: | |
| 2355 _max *= self._norm[1] | |
| 2356 par.set(value=value, min=_min, max=_max) | |
| 2357 par.init_value = par.value | |
| 2358 | |
| 2359 # Remap the best results | |
| 2360 # t5 = time() | |
| 2361 self._out_of_bounds = np.copy(np.reshape(self._out_of_bounds_flat, self._map_shape)) | |
| 2362 self._max_nfev = np.copy(np.reshape(self._max_nfev_flat, self._map_shape)) | |
| 2363 self._redchi = np.copy(np.reshape(self._redchi_flat, self._map_shape)) | |
| 2364 self._success = np.copy(np.reshape(self._success_flat, self._map_shape)) | |
| 2365 self._best_fit = np.copy(np.reshape(self._best_fit_flat, | |
| 2366 list(self._map_shape)+[x_size])) | |
| 2367 self._best_values = np.asarray([np.reshape(par, list(self._map_shape)) | |
| 2368 for par in self._best_values_flat]) | |
| 2369 self._best_errors = np.asarray([np.reshape(par, list(self._map_shape)) | |
| 2370 for par in self._best_errors_flat]) | |
| 2371 if self._inv_transpose is not None: | |
| 2372 self._out_of_bounds = np.transpose(self._out_of_bounds, self._inv_transpose) | |
| 2373 self._max_nfev = np.transpose(self._max_nfev, self._inv_transpose) | |
| 2374 self._redchi = np.transpose(self._redchi, self._inv_transpose) | |
| 2375 self._success = np.transpose(self._success, self._inv_transpose) | |
| 2376 self._best_fit = np.transpose(self._best_fit, | |
| 2377 list(self._inv_transpose)+[len(self._inv_transpose)]) | |
| 2378 self._best_values = np.transpose(self._best_values, | |
| 2379 [0]+[i+1 for i in self._inv_transpose]) | |
| 2380 self._best_errors = np.transpose(self._best_errors, | |
| 2381 [0]+[i+1 for i in self._inv_transpose]) | |
| 2382 del self._out_of_bounds_flat | |
| 2383 del self._max_nfev_flat | |
| 2384 del self._redchi_flat | |
| 2385 del self._success_flat | |
| 2386 del self._best_fit_flat | |
| 2387 del self._best_values_flat | |
| 2388 del self._best_errors_flat | |
| 2389 # t6 = time() | |
| 2390 | |
| 2391 # Restore parameter bounds and renormalize the parameters | |
| 2392 for name, par in self._parameter_bounds.items(): | |
| 2393 self._parameters[name].set(min=par['min'], max=par['max']) | |
| 2394 self._normalized = False | |
| 2395 if self._norm is not None: | |
| 2396 for name, norm in self._parameter_norms.items(): | |
| 2397 par = self._parameters[name] | |
| 2398 if par.expr is None and norm: | |
| 2399 value = par.value*self._norm[1] | |
| 2400 _min = par.min | |
| 2401 _max = par.max | |
| 2402 if not np.isinf(_min) and abs(_min) != float_min: | |
| 2403 _min *= self._norm[1] | |
| 2404 if not np.isinf(_max) and abs(_max) != float_min: | |
| 2405 _max *= self._norm[1] | |
| 2406 par.set(value=value, min=_min, max=_max) | |
| 2407 # t7 = time() | |
| 2408 # print(f'total run time in fit: {t7-t0:.2f} seconds') | |
| 2409 # print(f'run time first fit: {t2-t1:.2f} seconds') | |
| 2410 # print(f'run time remaining fits: {t4-t3:.2f} seconds') | |
| 2411 # print(f'run time remapping results: {t6-t5:.2f} seconds') | |
| 2412 | |
| 2413 # print('\n\nAt end fit:') | |
| 2414 # self._parameters.pretty_print() | |
| 2415 # print(f'self._best_values:\n{self._best_values}\n\n') | |
| 2416 | |
| 2417 # Free the shared memory | |
| 2418 self.freemem() | |
| 2419 | |
| 2420 def _fit_parallel(self, current_best_values, num, n_start, **kwargs): | |
| 2421 num = min(num, self._map_dim-n_start) | |
| 2422 for n in range(num): | |
| 2423 # print(f'{n_start+n} before:\n{current_best_values}') | |
| 2424 self._fit(n_start+n, current_best_values, **kwargs) | |
| 2425 # print(f'{n_start+n} after:\n{current_best_values}') | |
| 2426 | |
| 2427 def _fit(self, n, current_best_values, return_result=False, **kwargs): | |
| 2428 #RV print(f'\n\nstart FitMap._fit {n}\n') | |
| 2429 #RV print(f'current_best_values = {current_best_values}') | |
| 2430 #RV print(f'self._best_parameters = {self._best_parameters}') | |
| 2431 #RV print(f'self._new_parameters = {self._new_parameters}\n\n') | |
| 2432 # self._parameters.pretty_print() | |
| 2433 # Set parameters to current best values, but prevent them from sitting at boundaries | |
| 2434 if self._new_parameters is None: | |
| 2435 # Initial fit | |
| 2436 for name, value in current_best_values.items(): | |
| 2437 par = self._parameters[name] | |
| 2438 par.set(value=self._reset_par_at_boundary(par, value)) | |
| 2439 else: | |
| 2440 # Refit | |
| 2441 for i, name in enumerate(self._best_parameters): | |
| 2442 par = self._parameters[name] | |
| 2443 if name in self._new_parameters: | |
| 2444 if name in current_best_values: | |
| 2445 par.set(value=self._reset_par_at_boundary(par, current_best_values[name])) | |
| 2446 elif par.expr is None: | |
| 2447 par.set(value=self._best_values[i][n]) | |
| 2448 #RV print(f'\nbefore fit {n}') | |
| 2449 #RV self._parameters.pretty_print() | |
| 2450 if self._mask is None: | |
| 2451 result = self._model.fit(self._ymap_norm[n], self._parameters, x=self._x, **kwargs) | |
| 2452 else: | |
| 2453 result = self._model.fit(self._ymap_norm[n][~self._mask], self._parameters, | |
| 2454 x=self._x[~self._mask], **kwargs) | |
| 2455 # print(f'\nafter fit {n}') | |
| 2456 # self._parameters.pretty_print() | |
| 2457 # print(result.fit_report(show_correl=False)) | |
| 2458 out_of_bounds = False | |
| 2459 for name, par in self._parameter_bounds.items(): | |
| 2460 value = result.params[name].value | |
| 2461 if not np.isinf(par['min']) and value < par['min']: | |
| 2462 out_of_bounds = True | |
| 2463 break | |
| 2464 if not np.isinf(par['max']) and value > par['max']: | |
| 2465 out_of_bounds = True | |
| 2466 break | |
| 2467 self._out_of_bounds_flat[n] = out_of_bounds | |
| 2468 if self._try_no_bounds and out_of_bounds: | |
| 2469 # Rerun fit with parameter bounds in place | |
| 2470 for name, par in self._parameter_bounds.items(): | |
| 2471 self._parameters[name].set(min=par['min'], max=par['max']) | |
| 2472 # Set parameters to current best values, but prevent them from sitting at boundaries | |
| 2473 if self._new_parameters is None: | |
| 2474 # Initial fit | |
| 2475 for name, value in current_best_values.items(): | |
| 2476 par = self._parameters[name] | |
| 2477 par.set(value=self._reset_par_at_boundary(par, value)) | |
| 2478 else: | |
| 2479 # Refit | |
| 2480 for i, name in enumerate(self._best_parameters): | |
| 2481 par = self._parameters[name] | |
| 2482 if name in self._new_parameters: | |
| 2483 if name in current_best_values: | |
| 2484 par.set(value=self._reset_par_at_boundary(par, | |
| 2485 current_best_values[name])) | |
| 2486 elif par.expr is None: | |
| 2487 par.set(value=self._best_values[i][n]) | |
| 2488 # print('\nbefore fit') | |
| 2489 # self._parameters.pretty_print() | |
| 2490 # print(result.fit_report(show_correl=False)) | |
| 2491 if self._mask is None: | |
| 2492 result = self._model.fit(self._ymap_norm[n], self._parameters, x=self._x, **kwargs) | |
| 2493 else: | |
| 2494 result = self._model.fit(self._ymap_norm[n][~self._mask], self._parameters, | |
| 2495 x=self._x[~self._mask], **kwargs) | |
| 2496 # print(f'\nafter fit {n}') | |
| 2497 # self._parameters.pretty_print() | |
| 2498 # print(result.fit_report(show_correl=False)) | |
| 2499 out_of_bounds = False | |
| 2500 for name, par in self._parameter_bounds.items(): | |
| 2501 value = result.params[name].value | |
| 2502 if not np.isinf(par['min']) and value < par['min']: | |
| 2503 out_of_bounds = True | |
| 2504 break | |
| 2505 if not np.isinf(par['max']) and value > par['max']: | |
| 2506 out_of_bounds = True | |
| 2507 break | |
| 2508 # print(f'{n} redchi < redchi_cutoff = {result.redchi < self._redchi_cutoff} success = {result.success} out_of_bounds = {out_of_bounds}') | |
| 2509 # Reset parameters back to unbound | |
| 2510 for name in self._parameter_bounds.keys(): | |
| 2511 self._parameters[name].set(min=-np.inf, max=np.inf) | |
| 2512 assert(not out_of_bounds) | |
| 2513 if result.redchi >= self._redchi_cutoff: | |
| 2514 result.success = False | |
| 2515 if result.nfev == result.max_nfev: | |
| 2516 # print(f'Maximum number of function evaluations reached for n = {n}') | |
| 2517 # logging.warning(f'Maximum number of function evaluations reached for n = {n}') | |
| 2518 if result.redchi < self._redchi_cutoff: | |
| 2519 result.success = True | |
| 2520 self._max_nfev_flat[n] = True | |
| 2521 if result.success: | |
| 2522 assert(all(True for par in current_best_values if par in result.params.values())) | |
| 2523 for par in result.params.values(): | |
| 2524 if par.vary: | |
| 2525 current_best_values[par.name] = par.value | |
| 2526 else: | |
| 2527 logging.warning(f'Fit for n = {n} failed: {result.lmdif_message}') | |
| 2528 # Renormalize the data and results | |
| 2529 self._renormalize(n, result) | |
| 2530 if self._print_report: | |
| 2531 print(result.fit_report(show_correl=False)) | |
| 2532 if self._plot: | |
| 2533 dims = np.unravel_index(n, self._map_shape) | |
| 2534 if self._inv_transpose is not None: | |
| 2535 dims= tuple(dims[self._inv_transpose[i]] for i in range(len(dims))) | |
| 2536 super().plot(result=result, y=np.asarray(self._ymap[dims]), plot_comp_legends=True, | |
| 2537 skip_init=self._skip_init, title=str(dims)) | |
| 2538 #RV print(f'\n\nend FitMap._fit {n}\n') | |
| 2539 #RV print(f'current_best_values = {current_best_values}') | |
| 2540 # self._parameters.pretty_print() | |
| 2541 # print(result.fit_report(show_correl=False)) | |
| 2542 #RV print(f'\nself._best_values_flat:\n{self._best_values_flat}\n\n') | |
| 2543 if return_result: | |
| 2544 return(result) | |
| 2545 | |
| 2546 def _renormalize(self, n, result): | |
| 2547 self._redchi_flat[n] = np.float64(result.redchi) | |
| 2548 self._success_flat[n] = result.success | |
| 2549 if self._norm is None or not self._normalized: | |
| 2550 self._best_fit_flat[n] = result.best_fit | |
| 2551 for i, name in enumerate(self._best_parameters): | |
| 2552 self._best_values_flat[i][n] = np.float64(result.params[name].value) | |
| 2553 self._best_errors_flat[i][n] = np.float64(result.params[name].stderr) | |
| 2554 else: | |
| 2555 pars = set(self._parameter_norms) & set(self._best_parameters) | |
| 2556 for name, par in result.params.items(): | |
| 2557 if name in pars and self._parameter_norms[name]: | |
| 2558 if par.stderr is not None: | |
| 2559 par.stderr *= self._norm[1] | |
| 2560 if par.expr is None: | |
| 2561 par.value *= self._norm[1] | |
| 2562 if self._print_report: | |
| 2563 if par.init_value is not None: | |
| 2564 par.init_value *= self._norm[1] | |
| 2565 if not np.isinf(par.min) and abs(par.min) != float_min: | |
| 2566 par.min *= self._norm[1] | |
| 2567 if not np.isinf(par.max) and abs(par.max) != float_min: | |
| 2568 par.max *= self._norm[1] | |
| 2569 self._best_fit_flat[n] = result.best_fit*self._norm[1]+self._norm[0] | |
| 2570 for i, name in enumerate(self._best_parameters): | |
| 2571 self._best_values_flat[i][n] = np.float64(result.params[name].value) | |
| 2572 self._best_errors_flat[i][n] = np.float64(result.params[name].stderr) | |
| 2573 if self._plot: | |
| 2574 if not self._skip_init: | |
| 2575 result.init_fit = result.init_fit*self._norm[1]+self._norm[0] | |
| 2576 result.best_fit = np.copy(self._best_fit_flat[n]) |
