Mercurial > repos > rv43 > tomo
comparison fit.py @ 69:fba792d5f83b draft
planemo upload for repository https://github.com/rolfverberg/galaxytools commit ab9f412c362a4ab986d00e21d5185cfcf82485c1
author | rv43 |
---|---|
date | Fri, 10 Mar 2023 16:02:04 +0000 |
parents | f31ef7bfb430 |
children | 1cf15b61cd83 |
comparison
equal
deleted
inserted
replaced
68:ba5866d0251d | 69:fba792d5f83b |
---|---|
5 Created on Mon Dec 6 15:36:22 2021 | 5 Created on Mon Dec 6 15:36:22 2021 |
6 | 6 |
7 @author: rv43 | 7 @author: rv43 |
8 """ | 8 """ |
9 | 9 |
10 import sys | |
11 import re | |
12 import logging | 10 import logging |
11 | |
12 from asteval import Interpreter, get_ast_names | |
13 from copy import deepcopy | |
14 from lmfit import Model, Parameters | |
15 from lmfit.model import ModelResult | |
16 from lmfit.models import ConstantModel, LinearModel, QuadraticModel, PolynomialModel,\ | |
17 ExponentialModel, StepModel, RectangleModel, ExpressionModel, GaussianModel,\ | |
18 LorentzianModel | |
13 import numpy as np | 19 import numpy as np |
14 | 20 from os import cpu_count, getpid, listdir, mkdir, path |
15 from asteval import Interpreter | 21 from re import compile, sub |
16 from copy import deepcopy | 22 from shutil import rmtree |
17 #from lmfit import Minimizer | 23 from sympy import diff, simplify |
18 from lmfit import Model, Parameters | 24 try: |
19 from lmfit.models import ConstantModel, LinearModel, QuadraticModel, PolynomialModel,\ | 25 from joblib import Parallel, delayed |
20 StepModel, RectangleModel, GaussianModel, LorentzianModel | 26 have_joblib = True |
21 | 27 except: |
22 from general import is_index, index_nearest, quickPlot | 28 have_joblib = False |
29 try: | |
30 import xarray as xr | |
31 have_xarray = True | |
32 except: | |
33 have_xarray = False | |
34 | |
35 from .general import illegal_value, is_int, is_dict_series, is_index, index_nearest, \ | |
36 almost_equal, quick_plot #, eval_expr | |
37 #from sys import path as syspath | |
38 #syspath.append(f'/nfs/chess/user/rv43/msnctools/msnctools') | |
39 #from general import illegal_value, is_int, is_dict_series, is_index, index_nearest, \ | |
40 # almost_equal, quick_plot #, eval_expr | |
41 | |
42 from sys import float_info | |
43 float_min = float_info.min | |
44 float_max = float_info.max | |
23 | 45 |
24 # sigma = fwhm_factor*fwhm | 46 # sigma = fwhm_factor*fwhm |
25 fwhm_factor = { | 47 fwhm_factor = { |
26 'gaussian' : f'fwhm/(2*sqrt(2*log(2)))', | 48 'gaussian': f'fwhm/(2*sqrt(2*log(2)))', |
27 'lorentzian' : f'0.5*fwhm', | 49 'lorentzian': f'0.5*fwhm', |
28 'splitlorentzian' : f'0.5*fwhm', # sigma = sigma_r | 50 'splitlorentzian': f'0.5*fwhm', # sigma = sigma_r |
29 'voight' : f'0.2776*fwhm', # sigma = gamma | 51 'voight': f'0.2776*fwhm', # sigma = gamma |
30 'pseudovoight' : f'0.5*fwhm'} # fraction = 0.5 | 52 'pseudovoight': f'0.5*fwhm'} # fraction = 0.5 |
31 | 53 |
32 # amplitude = height_factor*height*fwhm | 54 # amplitude = height_factor*height*fwhm |
33 height_factor = { | 55 height_factor = { |
34 'gaussian' : f'height*fwhm*0.5*sqrt(pi/log(2))', | 56 'gaussian': f'height*fwhm*0.5*sqrt(pi/log(2))', |
35 'lorentzian' : f'height*fwhm*0.5*pi', | 57 'lorentzian': f'height*fwhm*0.5*pi', |
36 'splitlorentzian' : f'height*fwhm*0.5*pi', # sigma = sigma_r | 58 'splitlorentzian': f'height*fwhm*0.5*pi', # sigma = sigma_r |
37 'voight' : f'3.334*height*fwhm', # sigma = gamma | 59 'voight': f'3.334*height*fwhm', # sigma = gamma |
38 'pseudovoight' : f'1.268*height*fwhm'} # fraction = 0.5 | 60 'pseudovoight': f'1.268*height*fwhm'} # fraction = 0.5 |
39 | 61 |
40 class Fit: | 62 class Fit: |
41 """Wrapper class for lmfit | 63 """Wrapper class for lmfit |
42 """ | 64 """ |
43 def __init__(self, x, y, models=None, **kwargs): | 65 def __init__(self, y, x=None, models=None, normalize=True, **kwargs): |
44 self._x = x | 66 if not isinstance(normalize, bool): |
45 self._y = y | 67 raise ValueError(f'Invalid parameter normalize ({normalize})') |
68 self._mask = None | |
46 self._model = None | 69 self._model = None |
70 self._norm = None | |
71 self._normalized = False | |
47 self._parameters = Parameters() | 72 self._parameters = Parameters() |
73 self._parameter_bounds = None | |
74 self._parameter_norms = {} | |
75 self._linear_parameters = [] | |
76 self._nonlinear_parameters = [] | |
48 self._result = None | 77 self._result = None |
78 self._try_linear_fit = True | |
79 self._y = None | |
80 self._y_norm = None | |
81 self._y_range = None | |
82 if 'try_linear_fit' in kwargs: | |
83 try_linear_fit = kwargs.pop('try_linear_fit') | |
84 if not isinstance(try_linear_fit, bool): | |
85 illegal_value(try_linear_fit, 'try_linear_fit', 'Fit.fit', raise_error=True) | |
86 self._try_linear_fit = try_linear_fit | |
87 if y is not None: | |
88 if isinstance(y, (tuple, list, np.ndarray)): | |
89 self._x = np.asarray(x) | |
90 elif have_xarray and isinstance(y, xr.DataArray): | |
91 if x is not None: | |
92 logging.warning('Ignoring superfluous input x ({x}) in Fit.__init__') | |
93 if y.ndim != 1: | |
94 illegal_value(y.ndim, 'DataArray dimensions', 'Fit:__init__', raise_error=True) | |
95 self._x = np.asarray(y[y.dims[0]]) | |
96 else: | |
97 illegal_value(y, 'y', 'Fit:__init__', raise_error=True) | |
98 self._y = y | |
99 if self._x.ndim != 1: | |
100 raise ValueError(f'Invalid dimension for input x ({self._x.ndim})') | |
101 if self._x.size != self._y.size: | |
102 raise ValueError(f'Inconsistent x and y dimensions ({self._x.size} vs '+ | |
103 f'{self._y.size})') | |
104 if 'mask' in kwargs: | |
105 self._mask = kwargs.pop('mask') | |
106 if self._mask is None: | |
107 y_min = float(self._y.min()) | |
108 self._y_range = float(self._y.max())-y_min | |
109 if normalize and self._y_range > 0.0: | |
110 self._norm = (y_min, self._y_range) | |
111 else: | |
112 self._mask = np.asarray(self._mask).astype(bool) | |
113 if self._x.size != self._mask.size: | |
114 raise ValueError(f'Inconsistent x and mask dimensions ({self._x.size} vs '+ | |
115 f'{self._mask.size})') | |
116 y_masked = np.asarray(self._y)[~self._mask] | |
117 y_min = float(y_masked.min()) | |
118 self._y_range = float(y_masked.max())-y_min | |
119 if normalize and self._y_range > 0.0: | |
120 if normalize and self._y_range > 0.0: | |
121 self._norm = (y_min, self._y_range) | |
49 if models is not None: | 122 if models is not None: |
50 if callable(models) or isinstance(models, str): | 123 if callable(models) or isinstance(models, str): |
51 kwargs = self.add_model(models, **kwargs) | 124 kwargs = self.add_model(models, **kwargs) |
52 elif isinstance(models, (tuple, list)): | 125 elif isinstance(models, (tuple, list)): |
53 for model in models: | 126 for model in models: |
54 kwargs = self.add_model(model, **kwargs) | 127 kwargs = self.add_model(model, **kwargs) |
55 self.fit(**kwargs) | 128 self.fit(**kwargs) |
56 | 129 |
57 @classmethod | 130 @classmethod |
58 def fit_data(cls, x, y, models, **kwargs): | 131 def fit_data(cls, y, models, x=None, normalize=True, **kwargs): |
59 return cls(x, y, models, **kwargs) | 132 return(cls(y, x=x, models=models, normalize=normalize, **kwargs)) |
60 | 133 |
61 @property | 134 @property |
62 def best_errors(self): | 135 def best_errors(self): |
63 if self._result is None: | 136 if self._result is None: |
64 return None | 137 return(None) |
65 errors = {} | 138 return({name:self._result.params[name].stderr for name in sorted(self._result.params) |
66 names = sorted(self._result.params) | 139 if name != 'tmp_normalization_offset_c'}) |
67 for name in names: | |
68 par = self._result.params[name] | |
69 errors[name] = par.stderr | |
70 return errors | |
71 | 140 |
72 @property | 141 @property |
73 def best_fit(self): | 142 def best_fit(self): |
74 if self._result is None: | 143 if self._result is None: |
75 return None | 144 return(None) |
76 return self._result.best_fit | 145 return(self._result.best_fit) |
77 | 146 |
78 @property | 147 @property |
79 def best_parameters(self): | 148 def best_parameters(self): |
80 if self._result is None: | 149 if self._result is None: |
81 return None | 150 return(None) |
82 parameters = [] | 151 parameters = {} |
83 names = sorted(self._result.params) | 152 for name in sorted(self._result.params): |
84 for name in names: | 153 if name != 'tmp_normalization_offset_c': |
85 par = self._result.params[name] | 154 par = self._result.params[name] |
86 parameters.append({'name' : par.name, 'value' : par.value, 'error' : par.stderr, | 155 parameters[name] = {'value': par.value, 'error': par.stderr, |
87 'init_value' : par.init_value, 'min' : par.min, 'max' : par.max, | 156 'init_value': par.init_value, 'min': par.min, 'max': par.max, |
88 'vary' : par.vary, 'expr' : par.expr}) | 157 'vary': par.vary, 'expr': par.expr} |
89 return parameters | 158 return(parameters) |
159 | |
160 @property | |
161 def best_results(self): | |
162 """Convert the input data array to a data set and add the fit results. | |
163 """ | |
164 if self._result is None: | |
165 return(None) | |
166 if isinstance(self._y, xr.DataArray): | |
167 best_results = self._y.to_dataset() | |
168 dims = self._y.dims | |
169 fit_name = f'{self._y.name}_fit' | |
170 else: | |
171 coords = {'x': (['x'], self._x)} | |
172 dims = ('x') | |
173 best_results = xr.Dataset(coords=coords) | |
174 best_results['y'] = (dims, self._y) | |
175 fit_name = 'y_fit' | |
176 best_results[fit_name] = (dims, self.best_fit) | |
177 if self._mask is not None: | |
178 best_results['mask'] = self._mask | |
179 best_results.coords['par_names'] = ('peak', [name for name in self.best_values.keys()]) | |
180 best_results['best_values'] = (['par_names'], [v for v in self.best_values.values()]) | |
181 best_results['best_errors'] = (['par_names'], [v for v in self.best_errors.values()]) | |
182 best_results.attrs['components'] = self.components | |
183 return(best_results) | |
90 | 184 |
91 @property | 185 @property |
92 def best_values(self): | 186 def best_values(self): |
93 if self._result is None: | 187 if self._result is None: |
94 return None | 188 return(None) |
95 return self._result.params.valuesdict() | 189 return({name:self._result.params[name].value for name in sorted(self._result.params) |
190 if name != 'tmp_normalization_offset_c'}) | |
96 | 191 |
97 @property | 192 @property |
98 def chisqr(self): | 193 def chisqr(self): |
99 return self._result.chisqr | 194 if self._result is None: |
195 return(None) | |
196 return(self._result.chisqr) | |
197 | |
198 @property | |
199 def components(self): | |
200 components = {} | |
201 if self._result is None: | |
202 logging.warning('Unable to collect components in Fit.components') | |
203 return(components) | |
204 for component in self._result.components: | |
205 if 'tmp_normalization_offset_c' in component.param_names: | |
206 continue | |
207 parameters = {} | |
208 for name in component.param_names: | |
209 par = self._parameters[name] | |
210 parameters[name] = {'free': par.vary, 'value': self._result.params[name].value} | |
211 if par.expr is not None: | |
212 parameters[name]['expr'] = par.expr | |
213 expr = None | |
214 if isinstance(component, ExpressionModel): | |
215 name = component._name | |
216 if name[-1] == '_': | |
217 name = name[:-1] | |
218 expr = component.expr | |
219 else: | |
220 prefix = component.prefix | |
221 if len(prefix): | |
222 if prefix[-1] == '_': | |
223 prefix = prefix[:-1] | |
224 name = f'{prefix} ({component._name})' | |
225 else: | |
226 name = f'{component._name}' | |
227 if expr is None: | |
228 components[name] = {'parameters': parameters} | |
229 else: | |
230 components[name] = {'expr': expr, 'parameters': parameters} | |
231 return(components) | |
100 | 232 |
101 @property | 233 @property |
102 def covar(self): | 234 def covar(self): |
103 return self._result.covar | 235 if self._result is None: |
236 return(None) | |
237 return(self._result.covar) | |
238 | |
239 @property | |
240 def init_parameters(self): | |
241 if self._result is None or self._result.init_params is None: | |
242 return(None) | |
243 parameters = {} | |
244 for name in sorted(self._result.init_params): | |
245 if name != 'tmp_normalization_offset_c': | |
246 par = self._result.init_params[name] | |
247 parameters[name] = {'value': par.value, 'min': par.min, 'max': par.max, | |
248 'vary': par.vary, 'expr': par.expr} | |
249 return(parameters) | |
104 | 250 |
105 @property | 251 @property |
106 def init_values(self): | 252 def init_values(self): |
253 if self._result is None or self._result.init_params is None: | |
254 return(None) | |
255 return({name:self._result.init_params[name].value for name in | |
256 sorted(self._result.init_params) if name != 'tmp_normalization_offset_c'}) | |
257 | |
258 @property | |
259 def normalization_offset(self): | |
107 if self._result is None: | 260 if self._result is None: |
108 return None | 261 return(None) |
109 return self._result.init_values | 262 if self._norm is None: |
263 return(0.0) | |
264 else: | |
265 if self._result.init_params is not None: | |
266 normalization_offset = self._result.init_params['tmp_normalization_offset_c'] | |
267 else: | |
268 normalization_offset = self._result.params['tmp_normalization_offset_c'] | |
269 return(normalization_offset) | |
110 | 270 |
111 @property | 271 @property |
112 def num_func_eval(self): | 272 def num_func_eval(self): |
113 return self._result.nfev | 273 if self._result is None: |
274 return(None) | |
275 return(self._result.nfev) | |
276 | |
277 @property | |
278 def parameters(self): | |
279 return({name:{'min': par.min, 'max': par.max, 'vary': par.vary, 'expr': par.expr} | |
280 for name, par in self._parameters.items() if name != 'tmp_normalization_offset_c'}) | |
114 | 281 |
115 @property | 282 @property |
116 def redchi(self): | 283 def redchi(self): |
117 return self._result.redchi | 284 if self._result is None: |
285 return(None) | |
286 return(self._result.redchi) | |
118 | 287 |
119 @property | 288 @property |
120 def residual(self): | 289 def residual(self): |
121 return self._result.residual | 290 if self._result is None: |
291 return(None) | |
292 return(self._result.residual) | |
122 | 293 |
123 @property | 294 @property |
124 def success(self): | 295 def success(self): |
296 if self._result is None: | |
297 return(None) | |
125 if not self._result.success: | 298 if not self._result.success: |
126 # print(f'ier = {self._result.ier}') | 299 # print(f'ier = {self._result.ier}') |
127 # print(f'lmdif_message = {self._result.lmdif_message}') | 300 # print(f'lmdif_message = {self._result.lmdif_message}') |
128 # print(f'message = {self._result.message}') | 301 # print(f'message = {self._result.message}') |
129 # print(f'nfev = {self._result.nfev}') | 302 # print(f'nfev = {self._result.nfev}') |
131 # print(f'success = {self._result.success}') | 304 # print(f'success = {self._result.success}') |
132 if self._result.ier == 0 or self._result.ier == 5: | 305 if self._result.ier == 0 or self._result.ier == 5: |
133 logging.warning(f'ier = {self._result.ier}: {self._result.message}') | 306 logging.warning(f'ier = {self._result.ier}: {self._result.message}') |
134 else: | 307 else: |
135 logging.warning(f'ier = {self._result.ier}: {self._result.message}') | 308 logging.warning(f'ier = {self._result.ier}: {self._result.message}') |
136 return True | 309 return(True) |
137 # self.print_fit_report() | 310 # self.print_fit_report() |
138 # self.plot() | 311 # self.plot() |
139 return self._result.success | 312 return(self._result.success) |
140 | 313 |
141 @property | 314 @property |
142 def var_names(self): | 315 def var_names(self): |
143 """Intended to be used with covar | 316 """Intended to be used with covar |
144 """ | 317 """ |
145 if self._result is None: | 318 if self._result is None: |
146 return None | 319 return(None) |
147 return self._result.var_names | 320 return(getattr(self._result, 'var_names', None)) |
148 | 321 |
149 def print_fit_report(self, show_correl=False): | 322 @property |
150 if self._result is not None: | 323 def x(self): |
151 print(self._result.fit_report(show_correl=show_correl)) | 324 return(self._x) |
325 | |
326 @property | |
327 def y(self): | |
328 return(self._y) | |
329 | |
330 def print_fit_report(self, result=None, show_correl=False): | |
331 if result is None: | |
332 result = self._result | |
333 if result is not None: | |
334 print(result.fit_report(show_correl=show_correl)) | |
152 | 335 |
153 def add_parameter(self, **parameter): | 336 def add_parameter(self, **parameter): |
154 if not isinstance(parameter, dict): | 337 if not isinstance(parameter, dict): |
155 illegal_value(parameter, 'parameter', 'add_parameter') | 338 raise ValueError(f'Invalid parameter ({parameter})') |
156 return | 339 if parameter.get('expr') is not None: |
340 raise KeyError(f'Illegal "expr" key in parameter {parameter}') | |
341 name = parameter['name'] | |
342 if not isinstance(name, str): | |
343 raise ValueError(f'Illegal "name" value ({name}) in parameter {parameter}') | |
344 if parameter.get('norm') is None: | |
345 self._parameter_norms[name] = False | |
346 else: | |
347 norm = parameter.pop('norm') | |
348 if self._norm is None: | |
349 logging.warning(f'Ignoring norm in parameter {name} in '+ | |
350 f'Fit.add_parameter (normalization is turned off)') | |
351 self._parameter_norms[name] = False | |
352 else: | |
353 if not isinstance(norm, bool): | |
354 raise ValueError(f'Illegal "norm" value ({norm}) in parameter {parameter}') | |
355 self._parameter_norms[name] = norm | |
356 vary = parameter.get('vary') | |
357 if vary is not None: | |
358 if not isinstance(vary, bool): | |
359 raise ValueError(f'Illegal "vary" value ({vary}) in parameter {parameter}') | |
360 if not vary: | |
361 if 'min' in parameter: | |
362 logging.warning(f'Ignoring min in parameter {name} in '+ | |
363 f'Fit.add_parameter (vary = {vary})') | |
364 parameter.pop('min') | |
365 if 'max' in parameter: | |
366 logging.warning(f'Ignoring max in parameter {name} in '+ | |
367 f'Fit.add_parameter (vary = {vary})') | |
368 parameter.pop('max') | |
369 if self._norm is not None and name not in self._parameter_norms: | |
370 raise ValueError(f'Missing parameter normalization type for paremeter {name}') | |
157 self._parameters.add(**parameter) | 371 self._parameters.add(**parameter) |
158 | 372 |
159 def add_model(self, model, prefix=None, parameters=None, **kwargs): | 373 def add_model(self, model, prefix=None, parameters=None, parameter_norms=None, **kwargs): |
160 # Create the new model | 374 # Create the new model |
161 # print('\nAt start adding model:') | 375 # print(f'at start add_model:\nself._parameters:\n{self._parameters}') |
162 # self._parameters.pretty_print() | 376 # print(f'at start add_model: kwargs = {kwargs}') |
377 # print(f'parameters = {parameters}') | |
378 # print(f'parameter_norms = {parameter_norms}') | |
379 # if len(self._parameters.keys()): | |
380 # print('\nAt start adding model:') | |
381 # self._parameters.pretty_print() | |
382 # print(f'parameter_norms:\n{self._parameter_norms}') | |
163 if prefix is not None and not isinstance(prefix, str): | 383 if prefix is not None and not isinstance(prefix, str): |
164 logging.warning('Ignoring illegal prefix: {model} {type(model)}') | 384 logging.warning('Ignoring illegal prefix: {model} {type(model)}') |
165 prefix = None | 385 prefix = None |
386 if prefix is None: | |
387 pprefix = '' | |
388 else: | |
389 pprefix = prefix | |
390 if parameters is not None: | |
391 if isinstance(parameters, dict): | |
392 parameters = (parameters, ) | |
393 elif not is_dict_series(parameters): | |
394 illegal_value(parameters, 'parameters', 'Fit.add_model', raise_error=True) | |
395 parameters = deepcopy(parameters) | |
396 if parameter_norms is not None: | |
397 if isinstance(parameter_norms, dict): | |
398 parameter_norms = (parameter_norms, ) | |
399 if not is_dict_series(parameter_norms): | |
400 illegal_value(parameter_norms, 'parameter_norms', 'Fit.add_model', raise_error=True) | |
401 new_parameter_norms = {} | |
166 if callable(model): | 402 if callable(model): |
403 # Linear fit not yet implemented for callable models | |
404 self._try_linear_fit = False | |
405 if parameter_norms is None: | |
406 if parameters is None: | |
407 raise ValueError('Either "parameters" or "parameter_norms" is required in '+ | |
408 f'{model}') | |
409 for par in parameters: | |
410 name = par['name'] | |
411 if not isinstance(name, str): | |
412 raise ValueError(f'Illegal "name" value ({name}) in input parameters') | |
413 if par.get('norm') is not None: | |
414 norm = par.pop('norm') | |
415 if not isinstance(norm, bool): | |
416 raise ValueError(f'Illegal "norm" value ({norm}) in input parameters') | |
417 new_parameter_norms[f'{pprefix}{name}'] = norm | |
418 else: | |
419 for par in parameter_norms: | |
420 name = par['name'] | |
421 if not isinstance(name, str): | |
422 raise ValueError(f'Illegal "name" value ({name}) in input parameters') | |
423 norm = par.get('norm') | |
424 if norm is None or not isinstance(norm, bool): | |
425 raise ValueError(f'Illegal "norm" value ({norm}) in input parameters') | |
426 new_parameter_norms[f'{pprefix}{name}'] = norm | |
427 if parameters is not None: | |
428 for par in parameters: | |
429 if par.get('expr') is not None: | |
430 raise KeyError(f'Illegal "expr" key ({par.get("expr")}) in parameter '+ | |
431 f'{name} for a callable model {model}') | |
432 name = par['name'] | |
433 if not isinstance(name, str): | |
434 raise ValueError(f'Illegal "name" value ({name}) in input parameters') | |
435 # RV FIX callable model will need partial deriv functions for any linear pars to get the linearized matrix, so for now skip linear solution option | |
167 newmodel = Model(model, prefix=prefix) | 436 newmodel = Model(model, prefix=prefix) |
168 elif isinstance(model, str): | 437 elif isinstance(model, str): |
169 if model == 'constant': | 438 if model == 'constant': # Par: c |
170 newmodel = ConstantModel(prefix=prefix) | 439 newmodel = ConstantModel(prefix=prefix) |
171 elif model == 'linear': | 440 new_parameter_norms[f'{pprefix}c'] = True |
441 self._linear_parameters.append(f'{pprefix}c') | |
442 elif model == 'linear': # Par: slope, intercept | |
172 newmodel = LinearModel(prefix=prefix) | 443 newmodel = LinearModel(prefix=prefix) |
173 elif model == 'quadratic': | 444 new_parameter_norms[f'{pprefix}slope'] = True |
445 new_parameter_norms[f'{pprefix}intercept'] = True | |
446 self._linear_parameters.append(f'{pprefix}slope') | |
447 self._linear_parameters.append(f'{pprefix}intercept') | |
448 elif model == 'quadratic': # Par: a, b, c | |
174 newmodel = QuadraticModel(prefix=prefix) | 449 newmodel = QuadraticModel(prefix=prefix) |
175 elif model == 'gaussian': | 450 new_parameter_norms[f'{pprefix}a'] = True |
451 new_parameter_norms[f'{pprefix}b'] = True | |
452 new_parameter_norms[f'{pprefix}c'] = True | |
453 self._linear_parameters.append(f'{pprefix}a') | |
454 self._linear_parameters.append(f'{pprefix}b') | |
455 self._linear_parameters.append(f'{pprefix}c') | |
456 elif model == 'gaussian': # Par: amplitude, center, sigma (fwhm, height) | |
176 newmodel = GaussianModel(prefix=prefix) | 457 newmodel = GaussianModel(prefix=prefix) |
177 elif model == 'step': | 458 new_parameter_norms[f'{pprefix}amplitude'] = True |
459 new_parameter_norms[f'{pprefix}center'] = False | |
460 new_parameter_norms[f'{pprefix}sigma'] = False | |
461 self._linear_parameters.append(f'{pprefix}amplitude') | |
462 self._nonlinear_parameters.append(f'{pprefix}center') | |
463 self._nonlinear_parameters.append(f'{pprefix}sigma') | |
464 # parameter norms for height and fwhm are needed to get correct errors | |
465 new_parameter_norms[f'{pprefix}height'] = True | |
466 new_parameter_norms[f'{pprefix}fwhm'] = False | |
467 elif model == 'lorentzian': # Par: amplitude, center, sigma (fwhm, height) | |
468 newmodel = LorentzianModel(prefix=prefix) | |
469 new_parameter_norms[f'{pprefix}amplitude'] = True | |
470 new_parameter_norms[f'{pprefix}center'] = False | |
471 new_parameter_norms[f'{pprefix}sigma'] = False | |
472 self._linear_parameters.append(f'{pprefix}amplitude') | |
473 self._nonlinear_parameters.append(f'{pprefix}center') | |
474 self._nonlinear_parameters.append(f'{pprefix}sigma') | |
475 # parameter norms for height and fwhm are needed to get correct errors | |
476 new_parameter_norms[f'{pprefix}height'] = True | |
477 new_parameter_norms[f'{pprefix}fwhm'] = False | |
478 elif model == 'exponential': # Par: amplitude, decay | |
479 newmodel = ExponentialModel(prefix=prefix) | |
480 new_parameter_norms[f'{pprefix}amplitude'] = True | |
481 new_parameter_norms[f'{pprefix}decay'] = False | |
482 self._linear_parameters.append(f'{pprefix}amplitude') | |
483 self._nonlinear_parameters.append(f'{pprefix}decay') | |
484 elif model == 'step': # Par: amplitude, center, sigma | |
178 form = kwargs.get('form') | 485 form = kwargs.get('form') |
179 if form is not None: | 486 if form is not None: |
180 del kwargs['form'] | 487 kwargs.pop('form') |
181 if form is None or form not in ('linear', 'atan', 'arctan', 'erf', 'logistic'): | 488 if form is None or form not in ('linear', 'atan', 'arctan', 'erf', 'logistic'): |
182 logging.error(f'Illegal form parameter for build-in step model ({form})') | 489 raise ValueError(f'Invalid parameter form for build-in step model ({form})') |
183 return kwargs | |
184 newmodel = StepModel(prefix=prefix, form=form) | 490 newmodel = StepModel(prefix=prefix, form=form) |
185 elif model == 'rectangle': | 491 new_parameter_norms[f'{pprefix}amplitude'] = True |
492 new_parameter_norms[f'{pprefix}center'] = False | |
493 new_parameter_norms[f'{pprefix}sigma'] = False | |
494 self._linear_parameters.append(f'{pprefix}amplitude') | |
495 self._nonlinear_parameters.append(f'{pprefix}center') | |
496 self._nonlinear_parameters.append(f'{pprefix}sigma') | |
497 elif model == 'rectangle': # Par: amplitude, center1, center2, sigma1, sigma2 | |
186 form = kwargs.get('form') | 498 form = kwargs.get('form') |
187 if form is not None: | 499 if form is not None: |
188 del kwargs['form'] | 500 kwargs.pop('form') |
189 if form is None or form not in ('linear', 'atan', 'arctan', 'erf', 'logistic'): | 501 if form is None or form not in ('linear', 'atan', 'arctan', 'erf', 'logistic'): |
190 logging.error(f'Illegal form parameter for build-in rectangle model ({form})') | 502 raise ValueError('Invalid parameter form for build-in rectangle model '+ |
191 return kwargs | 503 f'({form})') |
192 newmodel = RectangleModel(prefix=prefix, form=form) | 504 newmodel = RectangleModel(prefix=prefix, form=form) |
193 else: | 505 new_parameter_norms[f'{pprefix}amplitude'] = True |
194 logging.error('Unknown build-in fit model') | 506 new_parameter_norms[f'{pprefix}center1'] = False |
195 return kwargs | 507 new_parameter_norms[f'{pprefix}center2'] = False |
196 else: | 508 new_parameter_norms[f'{pprefix}sigma1'] = False |
197 illegal_value(model, 'model', 'add_model') | 509 new_parameter_norms[f'{pprefix}sigma2'] = False |
198 return kwargs | 510 self._linear_parameters.append(f'{pprefix}amplitude') |
511 self._nonlinear_parameters.append(f'{pprefix}center1') | |
512 self._nonlinear_parameters.append(f'{pprefix}center2') | |
513 self._nonlinear_parameters.append(f'{pprefix}sigma1') | |
514 self._nonlinear_parameters.append(f'{pprefix}sigma2') | |
515 elif model == 'expression': # Par: by expression | |
516 expr = kwargs['expr'] | |
517 if not isinstance(expr, str): | |
518 raise ValueError(f'Illegal "expr" value ({expr}) in {model}') | |
519 kwargs.pop('expr') | |
520 if parameter_norms is not None: | |
521 logging.warning('Ignoring parameter_norms (normalization determined from '+ | |
522 'linearity)}') | |
523 if parameters is not None: | |
524 for par in parameters: | |
525 if par.get('expr') is not None: | |
526 raise KeyError(f'Illegal "expr" key ({par.get("expr")}) in parameter '+ | |
527 f'({par}) for an expression model') | |
528 if par.get('norm') is not None: | |
529 logging.warning(f'Ignoring "norm" key in parameter ({par}) '+ | |
530 '(normalization determined from linearity)}') | |
531 par.pop('norm') | |
532 name = par['name'] | |
533 if not isinstance(name, str): | |
534 raise ValueError(f'Illegal "name" value ({name}) in input parameters') | |
535 ast = Interpreter() | |
536 expr_parameters = [name for name in get_ast_names(ast.parse(expr)) | |
537 if name != 'x' and name not in self._parameters | |
538 and name not in ast.symtable] | |
539 # print(f'\nexpr_parameters: {expr_parameters}') | |
540 # print(f'expr = {expr}') | |
541 if prefix is None: | |
542 newmodel = ExpressionModel(expr=expr) | |
543 else: | |
544 for name in expr_parameters: | |
545 expr = sub(rf'\b{name}\b', f'{prefix}{name}', expr) | |
546 expr_parameters = [f'{prefix}{name}' for name in expr_parameters] | |
547 # print(f'\nexpr_parameters: {expr_parameters}') | |
548 # print(f'expr = {expr}') | |
549 newmodel = ExpressionModel(expr=expr, name=name) | |
550 # print(f'\nnewmodel = {newmodel.__dict__}') | |
551 # print(f'params_names = {newmodel._param_names}') | |
552 # print(f'params_names = {newmodel.param_names}') | |
553 # Remove already existing names | |
554 for name in newmodel.param_names.copy(): | |
555 if name not in expr_parameters: | |
556 newmodel._func_allargs.remove(name) | |
557 newmodel._param_names.remove(name) | |
558 # print(f'params_names = {newmodel._param_names}') | |
559 # print(f'params_names = {newmodel.param_names}') | |
560 else: | |
561 raise ValueError(f'Unknown build-in fit model ({model})') | |
562 else: | |
563 illegal_value(model, 'model', 'Fit.add_model', raise_error=True) | |
199 | 564 |
200 # Add the new model to the current one | 565 # Add the new model to the current one |
566 # print('\nBefore adding model:') | |
567 # print(f'\nnewmodel = {newmodel.__dict__}') | |
568 # if len(self._parameters): | |
569 # self._parameters.pretty_print() | |
201 if self._model is None: | 570 if self._model is None: |
202 self._model = newmodel | 571 self._model = newmodel |
203 else: | 572 else: |
204 self._model += newmodel | 573 self._model += newmodel |
205 if self._parameters is None: | 574 new_parameters = newmodel.make_params() |
206 self._parameters = newmodel.make_params() | 575 self._parameters += new_parameters |
207 else: | |
208 self._parameters += newmodel.make_params() | |
209 # print('\nAfter adding model:') | 576 # print('\nAfter adding model:') |
577 # print(f'\nnewmodel = {newmodel.__dict__}') | |
578 # print(f'\nnew_parameters = {new_parameters}') | |
210 # self._parameters.pretty_print() | 579 # self._parameters.pretty_print() |
211 | 580 |
212 # Initialize the model parameters | 581 # Check linearity of expression model paremeters |
582 if isinstance(newmodel, ExpressionModel): | |
583 for name in newmodel.param_names: | |
584 if not diff(newmodel.expr, name, name): | |
585 if name not in self._linear_parameters: | |
586 self._linear_parameters.append(name) | |
587 new_parameter_norms[name] = True | |
588 # print(f'\nADDING {name} TO LINEAR') | |
589 else: | |
590 if name not in self._nonlinear_parameters: | |
591 self._nonlinear_parameters.append(name) | |
592 new_parameter_norms[name] = False | |
593 # print(f'\nADDING {name} TO NONLINEAR') | |
594 # print(f'new_parameter_norms:\n{new_parameter_norms}') | |
595 | |
596 # Scale the default initial model parameters | |
597 if self._norm is not None: | |
598 for name, norm in new_parameter_norms.copy().items(): | |
599 par = self._parameters.get(name) | |
600 if par is None: | |
601 new_parameter_norms.pop(name) | |
602 continue | |
603 if par.expr is None and norm: | |
604 value = par.value*self._norm[1] | |
605 _min = par.min | |
606 _max = par.max | |
607 if not np.isinf(_min) and abs(_min) != float_min: | |
608 _min *= self._norm[1] | |
609 if not np.isinf(_max) and abs(_max) != float_min: | |
610 _max *= self._norm[1] | |
611 par.set(value=value, min=_min, max=_max) | |
612 # print('\nAfter norm defaults:') | |
613 # self._parameters.pretty_print() | |
614 # print(f'parameters:\n{parameters}') | |
615 # print(f'all_parameters:\n{list(self.parameters)}') | |
616 # print(f'new_parameter_norms:\n{new_parameter_norms}') | |
617 # print(f'parameter_norms:\n{self._parameter_norms}') | |
618 | |
619 # Initialize the model parameters from parameters | |
213 if prefix is None: | 620 if prefix is None: |
214 prefix = "" | 621 prefix = "" |
215 if parameters is not None: | 622 if parameters is not None: |
216 if not isinstance(parameters, (tuple, list)): | |
217 illegal_value(parameters, 'parameters', 'add_model') | |
218 return kwargs | |
219 for parameter in parameters: | 623 for parameter in parameters: |
220 if not isinstance(parameter, dict): | 624 name = parameter['name'] |
221 illegal_value(parameter, 'parameter in parameters', 'add_model') | 625 if not isinstance(name, str): |
222 return kwargs | 626 raise ValueError(f'Illegal "name" value ({name}) in input parameters') |
223 parameter['name'] = prefix+parameter['name'] | 627 if name not in new_parameters: |
224 self._parameters.add(**parameter) | 628 name = prefix+name |
225 for name, value in kwargs.items(): | 629 parameter['name'] = name |
226 if isinstance(value, (int, float)): | 630 if name not in new_parameters: |
227 self._parameters.add(prefix+name, value=value) | 631 logging.warning(f'Ignoring superfluous parameter info for {name}') |
228 # print('\nAt end add_model:') | 632 continue |
633 if name in self._parameters: | |
634 parameter.pop('name') | |
635 if 'norm' in parameter: | |
636 if not isinstance(parameter['norm'], bool): | |
637 illegal_value(parameter['norm'], 'norm', 'Fit.add_model', | |
638 raise_error=True) | |
639 new_parameter_norms[name] = parameter['norm'] | |
640 parameter.pop('norm') | |
641 if parameter.get('expr') is not None: | |
642 if 'value' in parameter: | |
643 logging.warning(f'Ignoring value in parameter {name} '+ | |
644 f'(set by expression: {parameter["expr"]})') | |
645 parameter.pop('value') | |
646 if 'vary' in parameter: | |
647 logging.warning(f'Ignoring vary in parameter {name} '+ | |
648 f'(set by expression: {parameter["expr"]})') | |
649 parameter.pop('vary') | |
650 if 'min' in parameter: | |
651 logging.warning(f'Ignoring min in parameter {name} '+ | |
652 f'(set by expression: {parameter["expr"]})') | |
653 parameter.pop('min') | |
654 if 'max' in parameter: | |
655 logging.warning(f'Ignoring max in parameter {name} '+ | |
656 f'(set by expression: {parameter["expr"]})') | |
657 parameter.pop('max') | |
658 if 'vary' in parameter: | |
659 if not isinstance(parameter['vary'], bool): | |
660 illegal_value(parameter['vary'], 'vary', 'Fit.add_model', | |
661 raise_error=True) | |
662 if not parameter['vary']: | |
663 if 'min' in parameter: | |
664 logging.warning(f'Ignoring min in parameter {name} in '+ | |
665 f'Fit.add_model (vary = {parameter["vary"]})') | |
666 parameter.pop('min') | |
667 if 'max' in parameter: | |
668 logging.warning(f'Ignoring max in parameter {name} in '+ | |
669 f'Fit.add_model (vary = {parameter["vary"]})') | |
670 parameter.pop('max') | |
671 self._parameters[name].set(**parameter) | |
672 parameter['name'] = name | |
673 else: | |
674 illegal_value(parameter, 'parameter name', 'Fit.model', raise_error=True) | |
675 self._parameter_norms = {**self._parameter_norms, **new_parameter_norms} | |
676 # print('\nAfter parameter init:') | |
229 # self._parameters.pretty_print() | 677 # self._parameters.pretty_print() |
230 | 678 # print(f'parameters:\n{parameters}') |
231 return kwargs | 679 # print(f'new_parameter_norms:\n{new_parameter_norms}') |
680 # print(f'parameter_norms:\n{self._parameter_norms}') | |
681 # print(f'kwargs:\n{kwargs}') | |
682 | |
683 # Initialize the model parameters from kwargs | |
684 for name, value in {**kwargs}.items(): | |
685 full_name = f'{pprefix}{name}' | |
686 if full_name in new_parameter_norms and isinstance(value, (int, float)): | |
687 kwargs.pop(name) | |
688 if self._parameters[full_name].expr is None: | |
689 self._parameters[full_name].set(value=value) | |
690 else: | |
691 logging.warning(f'Ignoring parameter {name} in Fit.fit (set by expression: '+ | |
692 f'{self._parameters[full_name].expr})') | |
693 # print('\nAfter kwargs init:') | |
694 # self._parameters.pretty_print() | |
695 # print(f'parameter_norms:\n{self._parameter_norms}') | |
696 # print(f'kwargs:\n{kwargs}') | |
697 | |
698 # Check parameter norms (also need it for expressions to renormalize the errors) | |
699 if self._norm is not None and (callable(model) or model == 'expression'): | |
700 missing_norm = False | |
701 for name in new_parameters.valuesdict(): | |
702 if name not in self._parameter_norms: | |
703 print(f'new_parameters:\n{new_parameters.valuesdict()}') | |
704 print(f'self._parameter_norms:\n{self._parameter_norms}') | |
705 logging.error(f'Missing parameter normalization type for {name} in {model}') | |
706 missing_norm = True | |
707 if missing_norm: | |
708 raise ValueError | |
709 | |
710 # print(f'at end add_model:\nself._parameters:\n{list(self.parameters)}') | |
711 # print(f'at end add_model: kwargs = {kwargs}') | |
712 # print(f'\nat end add_model: newmodel:\n{newmodel.__dict__}\n') | |
713 return(kwargs) | |
232 | 714 |
233 def fit(self, interactive=False, guess=False, **kwargs): | 715 def fit(self, interactive=False, guess=False, **kwargs): |
716 # Check inputs | |
234 if self._model is None: | 717 if self._model is None: |
235 logging.error('Undefined fit model') | 718 logging.error('Undefined fit model') |
236 return | 719 return |
237 # Current parameter values | 720 if not isinstance(interactive, bool): |
238 pars = self._parameters.valuesdict() | 721 illegal_value(interactive, 'interactive', 'Fit.fit', raise_error=True) |
722 if not isinstance(guess, bool): | |
723 illegal_value(guess, 'guess', 'Fit.fit', raise_error=True) | |
724 if 'try_linear_fit' in kwargs: | |
725 try_linear_fit = kwargs.pop('try_linear_fit') | |
726 if not isinstance(try_linear_fit, bool): | |
727 illegal_value(try_linear_fit, 'try_linear_fit', 'Fit.fit', raise_error=True) | |
728 if not self._try_linear_fit: | |
729 logging.warning('Ignore superfluous keyword argument "try_linear_fit" (not '+ | |
730 'yet supported for callable models)') | |
731 else: | |
732 self._try_linear_fit = try_linear_fit | |
733 # if self._result is None: | |
734 # if 'parameters' in kwargs: | |
735 # raise ValueError('Invalid parameter parameters ({kwargs["parameters"]})') | |
736 # else: | |
737 if self._result is not None: | |
738 if guess: | |
739 logging.warning('Ignoring input parameter guess in Fit.fit during refitting') | |
740 guess = False | |
741 | |
742 # Check for circular expressions | |
743 # FIX TODO | |
744 # for name1, par1 in self._parameters.items(): | |
745 # if par1.expr is not None: | |
746 | |
747 # Apply mask if supplied: | |
748 if 'mask' in kwargs: | |
749 self._mask = kwargs.pop('mask') | |
750 if self._mask is not None: | |
751 self._mask = np.asarray(self._mask).astype(bool) | |
752 if self._x.size != self._mask.size: | |
753 raise ValueError(f'Inconsistent x and mask dimensions ({self._x.size} vs '+ | |
754 f'{self._mask.size})') | |
755 | |
756 # Estimate initial parameters with build-in lmfit guess method (only for a single model) | |
757 # print(f'\nat start fit: kwargs = {kwargs}') | |
758 #RV print('\nAt start of fit:') | |
759 #RV self._parameters.pretty_print() | |
760 # print(f'parameter_norms:\n{self._parameter_norms}') | |
761 if guess: | |
762 if self._mask is None: | |
763 self._parameters = self._model.guess(self._y, x=self._x) | |
764 else: | |
765 self._parameters = self._model.guess(np.asarray(self._y)[~self._mask], | |
766 x=self._x[~self._mask]) | |
767 # print('\nAfter guess:') | |
768 # self._parameters.pretty_print() | |
769 | |
770 # Add constant offset for a normalized model | |
771 if self._result is None and self._norm is not None and self._norm[0]: | |
772 self.add_model('constant', prefix='tmp_normalization_offset_', parameters={'name': 'c', | |
773 'value': -self._norm[0], 'vary': False, 'norm': True}) | |
774 #'value': -self._norm[0]/self._norm[1], 'vary': False, 'norm': False}) | |
775 | |
776 # Adjust existing parameters for refit: | |
777 if 'parameters' in kwargs: | |
778 parameters = kwargs.pop('parameters') | |
779 if isinstance(parameters, dict): | |
780 parameters = (parameters, ) | |
781 elif not is_dict_series(parameters): | |
782 illegal_value(parameters, 'parameters', 'Fit.fit', raise_error=True) | |
783 for par in parameters: | |
784 name = par['name'] | |
785 if name not in self._parameters: | |
786 raise ValueError(f'Unable to match {name} parameter {par} to an existing one') | |
787 if self._parameters[name].expr is not None: | |
788 raise ValueError(f'Unable to modify {name} parameter {par} (currently an '+ | |
789 'expression)') | |
790 if par.get('expr') is not None: | |
791 raise KeyError(f'Illegal "expr" key in {name} parameter {par}') | |
792 self._parameters[name].set(vary=par.get('vary')) | |
793 self._parameters[name].set(min=par.get('min')) | |
794 self._parameters[name].set(max=par.get('max')) | |
795 self._parameters[name].set(value=par.get('value')) | |
796 #RV print('\nAfter adjust:') | |
797 #RV self._parameters.pretty_print() | |
798 | |
239 # Apply parameter updates through keyword arguments | 799 # Apply parameter updates through keyword arguments |
240 for par in set(pars) & set(kwargs): | 800 # print(f'kwargs = {kwargs}') |
241 pars[par] = kwargs.pop(par) | 801 # print(f'parameter_norms = {self._parameter_norms}') |
242 self._parameters[par].set(value=pars[par]) | 802 for name in set(self._parameters) & set(kwargs): |
803 value = kwargs.pop(name) | |
804 if self._parameters[name].expr is None: | |
805 self._parameters[name].set(value=value) | |
806 else: | |
807 logging.warning(f'Ignoring parameter {name} in Fit.fit (set by expression: '+ | |
808 f'{self._parameters[name].expr})') | |
809 | |
243 # Check for uninitialized parameters | 810 # Check for uninitialized parameters |
244 for par, value in pars.items(): | 811 for name, par in self._parameters.items(): |
245 if value is None or np.isinf(value) or np.isnan(value): | 812 if par.expr is None: |
246 if interactive: | 813 value = par.value |
247 self._parameters[par].set(value= | 814 if value is None or np.isinf(value) or np.isnan(value): |
248 input_num(f'Enter an initial value for {par}: ')) | 815 if interactive: |
816 value = input_num(f'Enter an initial value for {name}', default=1.0) | |
817 else: | |
818 value = 1.0 | |
819 if self._norm is None or name not in self._parameter_norms: | |
820 self._parameters[name].set(value=value) | |
821 elif self._parameter_norms[name]: | |
822 self._parameters[name].set(value=value*self._norm[1]) | |
823 | |
824 # Check if model is linear | |
825 try: | |
826 linear_model = self._check_linearity_model() | |
827 except: | |
828 linear_model = False | |
829 # print(f'\n\n--------> linear_model = {linear_model}\n') | |
830 if kwargs.get('check_only_linearity') is not None: | |
831 return(linear_model) | |
832 | |
833 # Normalize the data and initial parameters | |
834 #RV print('\nBefore normalization:') | |
835 #RV self._parameters.pretty_print() | |
836 # print(f'parameter_norms:\n{self._parameter_norms}') | |
837 self._normalize() | |
838 # print(f'norm = {self._norm}') | |
839 #RV print('\nAfter normalization:') | |
840 #RV self._parameters.pretty_print() | |
841 # self.print_fit_report() | |
842 # print(f'parameter_norms:\n{self._parameter_norms}') | |
843 | |
844 if linear_model: | |
845 # Perform a linear fit by direct matrix solution with numpy | |
846 try: | |
847 if self._mask is None: | |
848 self._fit_linear_model(self._x, self._y_norm) | |
249 else: | 849 else: |
250 self._parameters[par].set(value=1.0) | 850 self._fit_linear_model(self._x[~self._mask], |
251 # print('\nAt start actual fit:') | 851 np.asarray(self._y_norm)[~self._mask]) |
252 # print(f'kwargs = {kwargs}') | 852 except: |
253 # self._parameters.pretty_print() | 853 linear_model = False |
254 # print(f'parameters:\n{self._parameters}') | 854 if not linear_model: |
255 # print(f'x = {self._x}') | 855 # Perform a non-linear fit with lmfit |
256 # print(f'len(x) = {len(self._x)}') | 856 # Prevent initial values from sitting at boundaries |
257 # print(f'y = {self._y}') | 857 self._parameter_bounds = {name:{'min': par.min, 'max': par.max} for name, par in |
258 # print(f'len(y) = {len(self._y)}') | 858 self._parameters.items() if par.vary} |
259 if guess: | 859 for par in self._parameters.values(): |
260 self._parameters = self._model.guess(self._y, x=self._x) | 860 if par.vary: |
261 self._result = self._model.fit(self._y, self._parameters, x=self._x, **kwargs) | 861 par.set(value=self._reset_par_at_boundary(par, par.value)) |
262 # print('\nAt end actual fit:') | 862 # print('\nAfter checking boundaries:') |
263 # print(f'var_names:\n{self._result.var_names}') | 863 # self._parameters.pretty_print() |
264 # print(f'stderr:\n{np.sqrt(np.diagonal(self._result.covar))}') | 864 |
265 # self._parameters.pretty_print() | 865 # Perform the fit |
266 # print(f'parameters:\n{self._parameters}') | 866 # fit_kws = None |
267 | 867 # if 'Dfun' in kwargs: |
268 def plot(self): | 868 # fit_kws = {'Dfun': kwargs.pop('Dfun')} |
269 if self._result is None: | 869 # self._result = self._model.fit(self._y_norm, self._parameters, x=self._x, |
870 # fit_kws=fit_kws, **kwargs) | |
871 if self._mask is None: | |
872 self._result = self._model.fit(self._y_norm, self._parameters, x=self._x, **kwargs) | |
873 else: | |
874 self._result = self._model.fit(np.asarray(self._y_norm)[~self._mask], | |
875 self._parameters, x=self._x[~self._mask], **kwargs) | |
876 #RV print('\nAfter fit:') | |
877 # print(f'\nself._result ({self._result}):\n\t{self._result.__dict__}') | |
878 #RV self._parameters.pretty_print() | |
879 # self.print_fit_report() | |
880 | |
881 # Set internal parameter values to fit results upon success | |
882 if self.success: | |
883 for name, par in self._parameters.items(): | |
884 if par.expr is None and par.vary: | |
885 par.set(value=self._result.params[name].value) | |
886 # print('\nAfter update parameter values:') | |
887 # self._parameters.pretty_print() | |
888 | |
889 # Renormalize the data and results | |
890 self._renormalize() | |
891 #RV print('\nAfter renormalization:') | |
892 #RV self._parameters.pretty_print() | |
893 # self.print_fit_report() | |
894 | |
895 def plot(self, y=None, y_title=None, result=None, skip_init=False, plot_comp_legends=False, | |
896 plot_residual=False, plot_masked_data=True, **kwargs): | |
897 if result is None: | |
898 result = self._result | |
899 if result is None: | |
270 return | 900 return |
271 components = self._result.eval_components() | 901 plots = [] |
272 plots = ((self._x, self._y, '.'), (self._x, self._result.best_fit, 'k-'), | 902 legend = [] |
273 (self._x, self._result.init_fit, 'g-')) | 903 if self._mask is None: |
274 legend = ['data', 'best fit', 'init'] | 904 mask = np.zeros(self._x.size).astype(bool) |
275 if len(components) > 1: | 905 plot_masked_data = False |
906 else: | |
907 mask = self._mask | |
908 if y is not None: | |
909 if not isinstance(y, (tuple, list, np.ndarray)): | |
910 illegal_value(y, 'y', 'Fit.plot') | |
911 if len(y) != len(self._x): | |
912 logging.warning('Ignoring parameter y in Fit.plot (wrong dimension)') | |
913 y = None | |
914 if y is not None: | |
915 if y_title is None or not isinstance(y_title, str): | |
916 y_title = 'data' | |
917 plots += [(self._x, y, '.')] | |
918 legend += [y_title] | |
919 if self._y is not None: | |
920 plots += [(self._x, np.asarray(self._y), 'b.')] | |
921 legend += ['data'] | |
922 if plot_masked_data: | |
923 plots += [(self._x[mask], np.asarray(self._y)[mask], 'bx')] | |
924 legend += ['masked data'] | |
925 if isinstance(plot_residual, bool) and plot_residual: | |
926 plots += [(self._x[~mask], result.residual, 'k-')] | |
927 legend += ['residual'] | |
928 plots += [(self._x[~mask], result.best_fit, 'k-')] | |
929 legend += ['best fit'] | |
930 if not skip_init and hasattr(result, 'init_fit'): | |
931 plots += [(self._x[~mask], result.init_fit, 'g-')] | |
932 legend += ['init'] | |
933 components = result.eval_components(x=self._x[~mask]) | |
934 num_components = len(components) | |
935 if 'tmp_normalization_offset_' in components: | |
936 num_components -= 1 | |
937 if num_components > 1: | |
938 eval_index = 0 | |
276 for modelname, y in components.items(): | 939 for modelname, y in components.items(): |
940 if modelname == 'tmp_normalization_offset_': | |
941 continue | |
942 if modelname == '_eval': | |
943 modelname = f'eval{eval_index}' | |
944 if len(modelname) > 20: | |
945 modelname = f'{modelname[0:16]} ...' | |
277 if isinstance(y, (int, float)): | 946 if isinstance(y, (int, float)): |
278 y *= np.ones(len(self._x)) | 947 y *= np.ones(self._x[~mask].size) |
279 plots += ((self._x, y, '--'),) | 948 plots += [(self._x[~mask], y, '--')] |
280 # if modelname[-1] == '_': | 949 if plot_comp_legends: |
281 # legend.append(modelname[:-1]) | 950 if modelname[-1] == '_': |
282 # else: | 951 legend.append(modelname[:-1]) |
283 # legend.append(modelname) | 952 else: |
284 quickPlot(plots, legend=legend, block=True) | 953 legend.append(modelname) |
954 title = kwargs.get('title') | |
955 if title is not None: | |
956 kwargs.pop('title') | |
957 quick_plot(tuple(plots), legend=legend, title=title, block=True, **kwargs) | |
285 | 958 |
286 @staticmethod | 959 @staticmethod |
287 def guess_init_peak(x, y, *args, center_guess=None, use_max_for_center=True): | 960 def guess_init_peak(x, y, *args, center_guess=None, use_max_for_center=True): |
288 """ Return a guess for the initial height, center and fwhm for a peak | 961 """ Return a guess for the initial height, center and fwhm for a peak |
289 """ | 962 """ |
963 # print(f'\n\nargs = {args}') | |
964 # print(f'center_guess = {center_guess}') | |
965 # quick_plot(x, y, vlines=center_guess, block=True) | |
290 center_guesses = None | 966 center_guesses = None |
967 x = np.asarray(x) | |
968 y = np.asarray(y) | |
291 if len(x) != len(y): | 969 if len(x) != len(y): |
292 logging.error(f'Illegal x and y lengths ({len(x)}, {len(y)}), skip initial guess') | 970 logging.error(f'Invalid x and y lengths ({len(x)}, {len(y)}), skip initial guess') |
293 return None, None, None | 971 return(None, None, None) |
294 if isinstance(center_guess, (int, float)): | 972 if isinstance(center_guess, (int, float)): |
295 if len(args): | 973 if len(args): |
296 logging.warning('Ignoring additional arguments for single center_guess value') | 974 logging.warning('Ignoring additional arguments for single center_guess value') |
975 center_guesses = [center_guess] | |
297 elif isinstance(center_guess, (tuple, list, np.ndarray)): | 976 elif isinstance(center_guess, (tuple, list, np.ndarray)): |
298 if len(center_guess) == 1: | 977 if len(center_guess) == 1: |
299 logging.warning('Ignoring additional arguments for single center_guess value') | 978 logging.warning('Ignoring additional arguments for single center_guess value') |
300 if not isinstance(center_guess[0], (int, float)): | 979 if not isinstance(center_guess[0], (int, float)): |
301 raise ValueError(f'Illegal center_guess type ({type(center_guess[0])})') | 980 raise ValueError(f'Invalid parameter center_guess ({type(center_guess[0])})') |
302 center_guess = center_guess[0] | 981 center_guess = center_guess[0] |
303 else: | 982 else: |
304 if len(args) != 1: | 983 if len(args) != 1: |
305 raise ValueError(f'Illegal number of arguments ({len(args)})') | 984 raise ValueError(f'Invalid number of arguments ({len(args)})') |
306 n = args[0] | 985 n = args[0] |
307 if not is_index(n, 0, len(center_guess)): | 986 if not is_index(n, 0, len(center_guess)): |
308 raise ValueError('Illegal argument') | 987 raise ValueError('Invalid argument') |
309 center_guesses = center_guess | 988 center_guesses = center_guess |
310 center_guess = center_guesses[n] | 989 center_guess = center_guesses[n] |
311 elif center_guess is not None: | 990 elif center_guess is not None: |
312 raise ValueError(f'Illegal center_guess type ({type(center_guess)})') | 991 raise ValueError(f'Invalid center_guess type ({type(center_guess)})') |
992 # print(f'x = {x}') | |
993 # print(f'y = {y}') | |
994 # print(f'center_guess = {center_guess}') | |
313 | 995 |
314 # Sort the inputs | 996 # Sort the inputs |
315 index = np.argsort(x) | 997 index = np.argsort(x) |
316 x = x[index] | 998 x = x[index] |
317 y = y[index] | 999 y = y[index] |
318 miny = y.min() | 1000 miny = y.min() |
319 # print(f'miny = {miny}') | 1001 # print(f'miny = {miny}') |
320 # print(f'x_range = {x[0]} {x[-1]} {len(x)}') | 1002 # print(f'x_range = {x[0]} {x[-1]} {len(x)}') |
321 # print(f'y_range = {y[0]} {y[-1]} {len(y)}') | 1003 # print(f'y_range = {y[0]} {y[-1]} {len(y)}') |
1004 # quick_plot(x, y, vlines=center_guess, block=True) | |
322 | 1005 |
323 # xx = x | 1006 # xx = x |
324 # yy = y | 1007 # yy = y |
325 # Set range for current peak | 1008 # Set range for current peak |
1009 # print(f'n = {n}') | |
326 # print(f'center_guesses = {center_guesses}') | 1010 # print(f'center_guesses = {center_guesses}') |
327 if center_guesses is not None: | 1011 if center_guesses is not None: |
1012 if len(center_guesses) > 1: | |
1013 index = np.argsort(center_guesses) | |
1014 n = list(index).index(n) | |
1015 # print(f'n = {n}') | |
1016 # print(f'index = {index}') | |
1017 center_guesses = np.asarray(center_guesses)[index] | |
1018 # print(f'center_guesses = {center_guesses}') | |
328 if n == 0: | 1019 if n == 0: |
329 low = 0 | 1020 low = 0 |
330 upp = index_nearest(x, (center_guesses[0]+center_guesses[1])/2) | 1021 upp = index_nearest(x, (center_guesses[0]+center_guesses[1])/2) |
331 elif n == len(center_guesses)-1: | 1022 elif n == len(center_guesses)-1: |
332 low = index_nearest(x, (center_guesses[n-1]+center_guesses[n])/2) | 1023 low = index_nearest(x, (center_guesses[n-1]+center_guesses[n])/2) |
336 upp = index_nearest(x, (center_guesses[n]+center_guesses[n+1])/2) | 1027 upp = index_nearest(x, (center_guesses[n]+center_guesses[n+1])/2) |
337 # print(f'low = {low}') | 1028 # print(f'low = {low}') |
338 # print(f'upp = {upp}') | 1029 # print(f'upp = {upp}') |
339 x = x[low:upp] | 1030 x = x[low:upp] |
340 y = y[low:upp] | 1031 y = y[low:upp] |
341 # quickPlot(x, y, vlines=(x[0], center_guess, x[-1]), block=True) | 1032 # quick_plot(x, y, vlines=(x[0], center_guess, x[-1]), block=True) |
342 | 1033 |
343 # Estimate FHHM | 1034 # Estimate FHHM |
344 maxy = y.max() | 1035 maxy = y.max() |
345 # print(f'x_range = {x[0]} {x[-1]} {len(x)}') | 1036 # print(f'x_range = {x[0]} {x[-1]} {len(x)}') |
346 # print(f'y_range = {y[0]} {y[-1]} {len(y)} {miny} {maxy}') | 1037 # print(f'y_range = {y[0]} {y[-1]} {len(y)} {miny} {maxy}') |
375 for i in range(center_index, fwhm_index2): | 1066 for i in range(center_index, fwhm_index2): |
376 if y[i] < half_height: | 1067 if y[i] < half_height: |
377 fwhm_index2 = i | 1068 fwhm_index2 = i |
378 break | 1069 break |
379 # print(f'fwhm_index2 = {fwhm_index2} {x[fwhm_index2]}') | 1070 # print(f'fwhm_index2 = {fwhm_index2} {x[fwhm_index2]}') |
380 # quickPlot((x,y,'o'), vlines=(x[fwhm_index1], center, x[fwhm_index2]), block=True) | 1071 # quick_plot((x,y,'o'), vlines=(x[fwhm_index1], center, x[fwhm_index2]), block=True) |
381 if fwhm_index1 == 0 and fwhm_index2 < len(x)-1: | 1072 if fwhm_index1 == 0 and fwhm_index2 < len(x)-1: |
382 fwhm = 2*(x[fwhm_index2]-center) | 1073 fwhm = 2*(x[fwhm_index2]-center) |
383 elif fwhm_index1 > 0 and fwhm_index2 == len(x)-1: | 1074 elif fwhm_index1 > 0 and fwhm_index2 == len(x)-1: |
384 fwhm = 2*(center-x[fwhm_index1]) | 1075 fwhm = 2*(center-x[fwhm_index1]) |
385 else: | 1076 else: |
387 # print(f'fwhm_index1 = {fwhm_index1} {x[fwhm_index1]}') | 1078 # print(f'fwhm_index1 = {fwhm_index1} {x[fwhm_index1]}') |
388 # print(f'fwhm_index2 = {fwhm_index2} {x[fwhm_index2]}') | 1079 # print(f'fwhm_index2 = {fwhm_index2} {x[fwhm_index2]}') |
389 # print(f'fwhm = {fwhm}') | 1080 # print(f'fwhm = {fwhm}') |
390 | 1081 |
391 # Return height, center and FWHM | 1082 # Return height, center and FWHM |
392 # quickPlot((x,y,'o'), (xx,yy), vlines=(x[fwhm_index1], center, x[fwhm_index2]), block=True) | 1083 # quick_plot((x,y,'o'), (xx,yy), vlines=(x[fwhm_index1], center, x[fwhm_index2]), block=True) |
393 return height, center, fwhm | 1084 return(height, center, fwhm) |
1085 | |
1086 def _check_linearity_model(self): | |
1087 """Identify the linearity of all model parameters and check if the model is linear or not | |
1088 """ | |
1089 if not self._try_linear_fit: | |
1090 logging.info('Skip linearity check (not yet supported for callable models)') | |
1091 return(False) | |
1092 free_parameters = [name for name, par in self._parameters.items() if par.vary] | |
1093 for component in self._model.components: | |
1094 if 'tmp_normalization_offset_c' in component.param_names: | |
1095 continue | |
1096 if isinstance(component, ExpressionModel): | |
1097 for name in free_parameters: | |
1098 if diff(component.expr, name, name): | |
1099 # print(f'\t\t{component.expr} is non-linear in {name}') | |
1100 self._nonlinear_parameters.append(name) | |
1101 if name in self._linear_parameters: | |
1102 self._linear_parameters.remove(name) | |
1103 else: | |
1104 model_parameters = component.param_names.copy() | |
1105 for basename, hint in component.param_hints.items(): | |
1106 name = f'{component.prefix}{basename}' | |
1107 if hint.get('expr') is not None: | |
1108 model_parameters.remove(name) | |
1109 for name in model_parameters: | |
1110 expr = self._parameters[name].expr | |
1111 if expr is not None: | |
1112 for nname in free_parameters: | |
1113 if name in self._nonlinear_parameters: | |
1114 if diff(expr, nname): | |
1115 # print(f'\t\t{component} is non-linear in {nname} (through {name} = "{expr}")') | |
1116 self._nonlinear_parameters.append(nname) | |
1117 if nname in self._linear_parameters: | |
1118 self._linear_parameters.remove(nname) | |
1119 else: | |
1120 assert(name in self._linear_parameters) | |
1121 # print(f'\n\nexpr ({type(expr)}) = {expr}\nnname ({type(nname)}) = {nname}\n\n') | |
1122 if diff(expr, nname, nname): | |
1123 # print(f'\t\t{component} is non-linear in {nname} (through {name} = "{expr}")') | |
1124 self._nonlinear_parameters.append(nname) | |
1125 if nname in self._linear_parameters: | |
1126 self._linear_parameters.remove(nname) | |
1127 # print(f'\nfree parameters:\n\t{free_parameters}') | |
1128 # print(f'linear parameters:\n\t{self._linear_parameters}') | |
1129 # print(f'nonlinear parameters:\n\t{self._nonlinear_parameters}\n') | |
1130 if any(True for name in self._nonlinear_parameters if self._parameters[name].vary): | |
1131 return(False) | |
1132 return(True) | |
1133 | |
1134 def _fit_linear_model(self, x, y): | |
1135 """Perform a linear fit by direct matrix solution with numpy | |
1136 """ | |
1137 # Construct the matrix and the free parameter vector | |
1138 # print(f'\nparameters:') | |
1139 # self._parameters.pretty_print() | |
1140 # print(f'\nparameter_norms:\n\t{self._parameter_norms}') | |
1141 # print(f'\nlinear_parameters:\n\t{self._linear_parameters}') | |
1142 # print(f'nonlinear_parameters:\n\t{self._nonlinear_parameters}') | |
1143 free_parameters = [name for name, par in self._parameters.items() if par.vary] | |
1144 # print(f'free parameters:\n\t{free_parameters}\n') | |
1145 expr_parameters = {name:par.expr for name, par in self._parameters.items() | |
1146 if par.expr is not None} | |
1147 model_parameters = [] | |
1148 for component in self._model.components: | |
1149 if 'tmp_normalization_offset_c' in component.param_names: | |
1150 continue | |
1151 model_parameters += component.param_names | |
1152 for basename, hint in component.param_hints.items(): | |
1153 name = f'{component.prefix}{basename}' | |
1154 if hint.get('expr') is not None: | |
1155 expr_parameters.pop(name) | |
1156 model_parameters.remove(name) | |
1157 # print(f'expr parameters:\n{expr_parameters}') | |
1158 # print(f'model parameters:\n\t{model_parameters}\n') | |
1159 norm = 1.0 | |
1160 if self._normalized: | |
1161 norm = self._norm[1] | |
1162 # print(f'\n\nself._normalized = {self._normalized}\nnorm = {norm}\nself._norm = {self._norm}\n') | |
1163 # Add expression parameters to asteval | |
1164 ast = Interpreter() | |
1165 # print(f'Adding to asteval sym table:') | |
1166 for name, expr in expr_parameters.items(): | |
1167 # print(f'\tadding {name} {expr}') | |
1168 ast.symtable[name] = expr | |
1169 # Add constant parameters to asteval | |
1170 # (renormalize to use correctly in evaluation of expression models) | |
1171 for name, par in self._parameters.items(): | |
1172 if par.expr is None and not par.vary: | |
1173 if self._parameter_norms[name]: | |
1174 # print(f'\tadding {name} {par.value*norm}') | |
1175 ast.symtable[name] = par.value*norm | |
1176 else: | |
1177 # print(f'\tadding {name} {par.value}') | |
1178 ast.symtable[name] = par.value | |
1179 A = np.zeros((len(x), len(free_parameters)), dtype='float64') | |
1180 y_const = np.zeros(len(x), dtype='float64') | |
1181 have_expression_model = False | |
1182 for component in self._model.components: | |
1183 if isinstance(component, ConstantModel): | |
1184 name = component.param_names[0] | |
1185 # print(f'\nConstant model: {name} {self._parameters[name]}\n') | |
1186 if name in free_parameters: | |
1187 # print(f'\t\t{name} is a free constant set matrix column {free_parameters.index(name)} to 1.0') | |
1188 A[:,free_parameters.index(name)] = 1.0 | |
1189 else: | |
1190 if self._parameter_norms[name]: | |
1191 delta_y_const = self._parameters[name]*np.ones(len(x)) | |
1192 else: | |
1193 delta_y_const = (self._parameters[name]*norm)*np.ones(len(x)) | |
1194 y_const += delta_y_const | |
1195 # print(f'\ndelta_y_const ({type(delta_y_const)}):\n{delta_y_const}\n') | |
1196 elif isinstance(component, ExpressionModel): | |
1197 have_expression_model = True | |
1198 const_expr = component.expr | |
1199 # print(f'\nExpression model:\nconst_expr: {const_expr}\n') | |
1200 for name in free_parameters: | |
1201 dexpr_dname = diff(component.expr, name) | |
1202 if dexpr_dname: | |
1203 const_expr = f'{const_expr}-({str(dexpr_dname)})*{name}' | |
1204 # print(f'\tconst_expr: {const_expr}') | |
1205 if not self._parameter_norms[name]: | |
1206 dexpr_dname = f'({dexpr_dname})/{norm}' | |
1207 # print(f'\t{component.expr} is linear in {name}\n\t\tadd "{str(dexpr_dname)}" to matrix as column {free_parameters.index(name)}') | |
1208 fx = [(lambda _: ast.eval(str(dexpr_dname)))(ast(f'x={v}')) for v in x] | |
1209 # print(f'\tfx:\n{fx}') | |
1210 if len(ast.error): | |
1211 raise ValueError(f'Unable to evaluate {dexpr_dname}') | |
1212 A[:,free_parameters.index(name)] += fx | |
1213 # if self._parameter_norms[name]: | |
1214 # print(f'\t\t{component.expr} is linear in {name} add "{str(dexpr_dname)}" to matrix as column {free_parameters.index(name)}') | |
1215 # A[:,free_parameters.index(name)] += fx | |
1216 # else: | |
1217 # print(f'\t\t{component.expr} is linear in {name} add "({str(dexpr_dname)})/{norm}" to matrix as column {free_parameters.index(name)}') | |
1218 # A[:,free_parameters.index(name)] += np.asarray(fx)/norm | |
1219 # FIX: find another solution if expr not supported by simplify | |
1220 const_expr = str(simplify(f'({const_expr})/{norm}')) | |
1221 # print(f'\nconst_expr: {const_expr}') | |
1222 delta_y_const = [(lambda _: ast.eval(const_expr))(ast(f'x = {v}')) for v in x] | |
1223 y_const += delta_y_const | |
1224 # print(f'\ndelta_y_const ({type(delta_y_const)}):\n{delta_y_const}\n') | |
1225 if len(ast.error): | |
1226 raise ValueError(f'Unable to evaluate {const_expr}') | |
1227 else: | |
1228 free_model_parameters = [name for name in component.param_names | |
1229 if name in free_parameters or name in expr_parameters] | |
1230 # print(f'\nBuild-in model ({component}):\nfree_model_parameters: {free_model_parameters}\n') | |
1231 if not len(free_model_parameters): | |
1232 y_const += component.eval(params=self._parameters, x=x) | |
1233 elif isinstance(component, LinearModel): | |
1234 if f'{component.prefix}slope' in free_model_parameters: | |
1235 A[:,free_parameters.index(f'{component.prefix}slope')] = x | |
1236 else: | |
1237 y_const += self._parameters[f'{component.prefix}slope'].value*x | |
1238 if f'{component.prefix}intercept' in free_model_parameters: | |
1239 A[:,free_parameters.index(f'{component.prefix}intercept')] = 1.0 | |
1240 else: | |
1241 y_const += self._parameters[f'{component.prefix}intercept'].value* \ | |
1242 np.ones(len(x)) | |
1243 elif isinstance(component, QuadraticModel): | |
1244 if f'{component.prefix}a' in free_model_parameters: | |
1245 A[:,free_parameters.index(f'{component.prefix}a')] = x**2 | |
1246 else: | |
1247 y_const += self._parameters[f'{component.prefix}a'].value*x**2 | |
1248 if f'{component.prefix}b' in free_model_parameters: | |
1249 A[:,free_parameters.index(f'{component.prefix}b')] = x | |
1250 else: | |
1251 y_const += self._parameters[f'{component.prefix}b'].value*x | |
1252 if f'{component.prefix}c' in free_model_parameters: | |
1253 A[:,free_parameters.index(f'{component.prefix}c')] = 1.0 | |
1254 else: | |
1255 y_const += self._parameters[f'{component.prefix}c'].value*np.ones(len(x)) | |
1256 else: | |
1257 # At this point each build-in model must be strictly proportional to each linear | |
1258 # model parameter. Without this assumption, the model equation is needed | |
1259 # For the current build-in lmfit models, this can only ever be the amplitude | |
1260 assert(len(free_model_parameters) == 1) | |
1261 name = f'{component.prefix}amplitude' | |
1262 assert(free_model_parameters[0] == name) | |
1263 assert(self._parameter_norms[name]) | |
1264 expr = self._parameters[name].expr | |
1265 if expr is None: | |
1266 # print(f'\t{component} is linear in {name} add to matrix as column {free_parameters.index(name)}') | |
1267 parameters = deepcopy(self._parameters) | |
1268 parameters[name].set(value=1.0) | |
1269 index = free_parameters.index(name) | |
1270 A[:,free_parameters.index(name)] += component.eval(params=parameters, x=x) | |
1271 else: | |
1272 const_expr = expr | |
1273 # print(f'\tconst_expr: {const_expr}') | |
1274 parameters = deepcopy(self._parameters) | |
1275 parameters[name].set(value=1.0) | |
1276 dcomp_dname = component.eval(params=parameters, x=x) | |
1277 # print(f'\tdcomp_dname ({type(dcomp_dname)}):\n{dcomp_dname}') | |
1278 for nname in free_parameters: | |
1279 dexpr_dnname = diff(expr, nname) | |
1280 if dexpr_dnname: | |
1281 assert(self._parameter_norms[name]) | |
1282 # print(f'\t\td({expr})/d{nname} = {dexpr_dnname}') | |
1283 # print(f'\t\t{component} is linear in {nname} (through {name} = "{expr}", add to matrix as column {free_parameters.index(nname)})') | |
1284 fx = np.asarray(dexpr_dnname*dcomp_dname, dtype='float64') | |
1285 # print(f'\t\tfx ({type(fx)}): {fx}') | |
1286 # print(f'free_parameters.index({nname}): {free_parameters.index(nname)}') | |
1287 if self._parameter_norms[nname]: | |
1288 A[:,free_parameters.index(nname)] += fx | |
1289 else: | |
1290 A[:,free_parameters.index(nname)] += fx/norm | |
1291 const_expr = f'{const_expr}-({dexpr_dnname})*{nname}' | |
1292 # print(f'\t\tconst_expr: {const_expr}') | |
1293 const_expr = str(simplify(f'({const_expr})/{norm}')) | |
1294 # print(f'\tconst_expr: {const_expr}') | |
1295 fx = [(lambda _: ast.eval(const_expr))(ast(f'x = {v}')) for v in x] | |
1296 # print(f'\tfx: {fx}') | |
1297 delta_y_const = np.multiply(fx, dcomp_dname) | |
1298 y_const += delta_y_const | |
1299 # print(f'\ndelta_y_const ({type(delta_y_const)}):\n{delta_y_const}\n') | |
1300 # print(A) | |
1301 # print(y_const) | |
1302 solution, residual, rank, s = np.linalg.lstsq(A, y-y_const, rcond=None) | |
1303 # print(f'\nsolution ({type(solution)} {solution.shape}):\n\t{solution}') | |
1304 # print(f'\nresidual ({type(residual)} {residual.shape}):\n\t{residual}') | |
1305 # print(f'\nrank ({type(rank)} {rank.shape}):\n\t{rank}') | |
1306 # print(f'\ns ({type(s)} {s.shape}):\n\t{s}\n') | |
1307 | |
1308 # Assemble result (compensate for normalization in expression models) | |
1309 for name, value in zip(free_parameters, solution): | |
1310 self._parameters[name].set(value=value) | |
1311 if self._normalized and (have_expression_model or len(expr_parameters)): | |
1312 for name, norm in self._parameter_norms.items(): | |
1313 par = self._parameters[name] | |
1314 if par.expr is None and norm: | |
1315 self._parameters[name].set(value=par.value*self._norm[1]) | |
1316 # self._parameters.pretty_print() | |
1317 # print(f'\nself._parameter_norms:\n\t{self._parameter_norms}') | |
1318 self._result = ModelResult(self._model, deepcopy(self._parameters)) | |
1319 self._result.best_fit = self._model.eval(params=self._parameters, x=x) | |
1320 if self._normalized and (have_expression_model or len(expr_parameters)): | |
1321 if 'tmp_normalization_offset_c' in self._parameters: | |
1322 offset = self._parameters['tmp_normalization_offset_c'] | |
1323 else: | |
1324 offset = 0.0 | |
1325 self._result.best_fit = (self._result.best_fit-offset-self._norm[0])/self._norm[1] | |
1326 if self._normalized: | |
1327 for name, norm in self._parameter_norms.items(): | |
1328 par = self._parameters[name] | |
1329 if par.expr is None and norm: | |
1330 value = par.value/self._norm[1] | |
1331 self._parameters[name].set(value=value) | |
1332 self._result.params[name].set(value=value) | |
1333 # self._parameters.pretty_print() | |
1334 self._result.residual = self._result.best_fit-y | |
1335 self._result.components = self._model.components | |
1336 self._result.init_params = None | |
1337 # quick_plot((x, y, '.'), (x, y_const, 'g'), (x, self._result.best_fit, 'k'), (x, self._result.residual, 'r'), block=True) | |
1338 | |
1339 def _normalize(self): | |
1340 """Normalize the data and initial parameters | |
1341 """ | |
1342 if self._normalized: | |
1343 return | |
1344 if self._norm is None: | |
1345 if self._y is not None and self._y_norm is None: | |
1346 self._y_norm = np.asarray(self._y) | |
1347 else: | |
1348 if self._y is not None and self._y_norm is None: | |
1349 self._y_norm = (np.asarray(self._y)-self._norm[0])/self._norm[1] | |
1350 self._y_range = 1.0 | |
1351 for name, norm in self._parameter_norms.items(): | |
1352 par = self._parameters[name] | |
1353 if par.expr is None and norm: | |
1354 value = par.value/self._norm[1] | |
1355 _min = par.min | |
1356 _max = par.max | |
1357 if not np.isinf(_min) and abs(_min) != float_min: | |
1358 _min /= self._norm[1] | |
1359 if not np.isinf(_max) and abs(_max) != float_min: | |
1360 _max /= self._norm[1] | |
1361 par.set(value=value, min=_min, max=_max) | |
1362 self._normalized = True | |
1363 | |
1364 def _renormalize(self): | |
1365 """Renormalize the data and results | |
1366 """ | |
1367 if self._norm is None or not self._normalized: | |
1368 return | |
1369 self._normalized = False | |
1370 for name, norm in self._parameter_norms.items(): | |
1371 par = self._parameters[name] | |
1372 if par.expr is None and norm: | |
1373 value = par.value*self._norm[1] | |
1374 _min = par.min | |
1375 _max = par.max | |
1376 if not np.isinf(_min) and abs(_min) != float_min: | |
1377 _min *= self._norm[1] | |
1378 if not np.isinf(_max) and abs(_max) != float_min: | |
1379 _max *= self._norm[1] | |
1380 par.set(value=value, min=_min, max=_max) | |
1381 if self._result is None: | |
1382 return | |
1383 self._result.best_fit = self._result.best_fit*self._norm[1]+self._norm[0] | |
1384 for name, par in self._result.params.items(): | |
1385 if self._parameter_norms.get(name, False): | |
1386 if par.stderr is not None: | |
1387 par.stderr *= self._norm[1] | |
1388 if par.expr is None: | |
1389 _min = par.min | |
1390 _max = par.max | |
1391 value = par.value*self._norm[1] | |
1392 if par.init_value is not None: | |
1393 par.init_value *= self._norm[1] | |
1394 if not np.isinf(_min) and abs(_min) != float_min: | |
1395 _min *= self._norm[1] | |
1396 if not np.isinf(_max) and abs(_max) != float_min: | |
1397 _max *= self._norm[1] | |
1398 par.set(value=value, min=_min, max=_max) | |
1399 if hasattr(self._result, 'init_fit'): | |
1400 self._result.init_fit = self._result.init_fit*self._norm[1]+self._norm[0] | |
1401 if hasattr(self._result, 'init_values'): | |
1402 init_values = {} | |
1403 for name, value in self._result.init_values.items(): | |
1404 if name not in self._parameter_norms or self._parameters[name].expr is not None: | |
1405 init_values[name] = value | |
1406 elif self._parameter_norms[name]: | |
1407 init_values[name] = value*self._norm[1] | |
1408 self._result.init_values = init_values | |
1409 for name, par in self._result.init_params.items(): | |
1410 if par.expr is None and self._parameter_norms.get(name, False): | |
1411 value = par.value | |
1412 _min = par.min | |
1413 _max = par.max | |
1414 value *= self._norm[1] | |
1415 if not np.isinf(_min) and abs(_min) != float_min: | |
1416 _min *= self._norm[1] | |
1417 if not np.isinf(_max) and abs(_max) != float_min: | |
1418 _max *= self._norm[1] | |
1419 par.set(value=value, min=_min, max=_max) | |
1420 par.init_value = par.value | |
1421 # Don't renormalize chisqr, it has no useful meaning in physical units | |
1422 #self._result.chisqr *= self._norm[1]*self._norm[1] | |
1423 if self._result.covar is not None: | |
1424 for i, name in enumerate(self._result.var_names): | |
1425 if self._parameter_norms.get(name, False): | |
1426 for j in range(len(self._result.var_names)): | |
1427 if self._result.covar[i,j] is not None: | |
1428 self._result.covar[i,j] *= self._norm[1] | |
1429 if self._result.covar[j,i] is not None: | |
1430 self._result.covar[j,i] *= self._norm[1] | |
1431 # Don't renormalize redchi, it has no useful meaning in physical units | |
1432 #self._result.redchi *= self._norm[1]*self._norm[1] | |
1433 if self._result.residual is not None: | |
1434 self._result.residual *= self._norm[1] | |
1435 | |
1436 def _reset_par_at_boundary(self, par, value): | |
1437 assert(par.vary) | |
1438 name = par.name | |
1439 _min = self._parameter_bounds[name]['min'] | |
1440 _max = self._parameter_bounds[name]['max'] | |
1441 if np.isinf(_min): | |
1442 if not np.isinf(_max): | |
1443 if self._parameter_norms.get(name, False): | |
1444 upp = _max-0.1*self._y_range | |
1445 elif _max == 0.0: | |
1446 upp = _max-0.1 | |
1447 else: | |
1448 upp = _max-0.1*abs(_max) | |
1449 if value >= upp: | |
1450 return(upp) | |
1451 else: | |
1452 if np.isinf(_max): | |
1453 if self._parameter_norms.get(name, False): | |
1454 low = _min+0.1*self._y_range | |
1455 elif _min == 0.0: | |
1456 low = _min+0.1 | |
1457 else: | |
1458 low = _min+0.1*abs(_min) | |
1459 if value <= low: | |
1460 return(low) | |
1461 else: | |
1462 low = 0.9*_min+0.1*_max | |
1463 upp = 0.1*_min+0.9*_max | |
1464 if value <= low: | |
1465 return(low) | |
1466 elif value >= upp: | |
1467 return(upp) | |
1468 return(value) | |
394 | 1469 |
395 | 1470 |
396 class FitMultipeak(Fit): | 1471 class FitMultipeak(Fit): |
397 """Fit data with multiple peaks | 1472 """Fit data with multiple peaks |
398 """ | 1473 """ |
399 def __init__(self, x, y, normalize=True): | 1474 def __init__(self, y, x=None, normalize=True): |
400 super().__init__(x, deepcopy(y)) | 1475 super().__init__(y, x=x, normalize=normalize) |
401 self._norm = None | |
402 self._fwhm_max = None | 1476 self._fwhm_max = None |
403 self._sigma_max = None | 1477 self._sigma_max = None |
404 if normalize: | |
405 self._normalize() | |
406 #quickPlot((self._x,self._y), block=True) | |
407 | 1478 |
408 @classmethod | 1479 @classmethod |
409 def fit_multipeak(cls, x, y, centers, peak_models='gaussian', center_exprs=None, fit_type=None, | 1480 def fit_multipeak(cls, y, centers, x=None, normalize=True, peak_models='gaussian', |
410 background_order=None, fwhm_max=None, plot_components=None): | 1481 center_exprs=None, fit_type=None, background_order=None, background_exp=False, |
1482 fwhm_max=None, plot_components=False): | |
411 """Make sure that centers and fwhm_max are in the correct units and consistent with expr | 1483 """Make sure that centers and fwhm_max are in the correct units and consistent with expr |
412 for a uniform fit (fit_type == 'uniform') | 1484 for a uniform fit (fit_type == 'uniform') |
413 """ | 1485 """ |
414 fit = cls(x, y) | 1486 fit = cls(y, x=x, normalize=normalize) |
415 success = fit.fit(centers, fit_type=fit_type, peak_models=peak_models, fwhm_max=fwhm_max, | 1487 success = fit.fit(centers, fit_type=fit_type, peak_models=peak_models, fwhm_max=fwhm_max, |
416 center_exprs=center_exprs, background_order=background_order, | 1488 center_exprs=center_exprs, background_order=background_order, |
417 plot_components=plot_components) | 1489 background_exp=background_exp, plot_components=plot_components) |
418 if success: | 1490 if success: |
419 return fit.best_fit, fit.residual, fit.best_values, fit.best_errors, fit.redchi, \ | 1491 return(fit.best_fit, fit.residual, fit.best_values, fit.best_errors, fit.redchi, \ |
420 fit.success | 1492 fit.success) |
421 else: | 1493 else: |
422 return np.array([]), np.array([]), {}, {}, sys.float_info.max, False | 1494 return(np.array([]), np.array([]), {}, {}, float_max, False) |
423 | 1495 |
424 def fit(self, centers, fit_type=None, peak_models=None, center_exprs=None, fwhm_max=None, | 1496 def fit(self, centers, fit_type=None, peak_models=None, center_exprs=None, fwhm_max=None, |
425 background_order=None, plot_components=None, param_constraint=False): | 1497 background_order=None, background_exp=False, plot_components=False, |
1498 param_constraint=False): | |
426 self._fwhm_max = fwhm_max | 1499 self._fwhm_max = fwhm_max |
427 # Create the multipeak model | 1500 # Create the multipeak model |
428 self._create_model(centers, fit_type, peak_models, center_exprs, background_order, | 1501 self._create_model(centers, fit_type, peak_models, center_exprs, background_order, |
429 param_constraint) | 1502 background_exp, param_constraint) |
1503 | |
1504 # RV: Obsolete Normalize the data and results | |
1505 # print('\nBefore fit before normalization in FitMultipeak:') | |
1506 # self._parameters.pretty_print() | |
1507 # self._normalize() | |
1508 # print('\nBefore fit after normalization in FitMultipeak:') | |
1509 # self._parameters.pretty_print() | |
430 | 1510 |
431 # Perform the fit | 1511 # Perform the fit |
432 try: | 1512 try: |
433 if param_constraint: | 1513 if param_constraint: |
434 super().fit(fit_kws={'xtol' : 1.e-5, 'ftol' : 1.e-5, 'gtol' : 1.e-5}) | 1514 super().fit(fit_kws={'xtol': 1.e-5, 'ftol': 1.e-5, 'gtol': 1.e-5}) |
435 else: | 1515 else: |
436 super().fit() | 1516 super().fit() |
437 except: | 1517 except: |
438 return False | 1518 return(False) |
439 | 1519 |
440 # Check for valid fit parameter results | 1520 # Check for valid fit parameter results |
441 fit_failure = self._check_validity() | 1521 fit_failure = self._check_validity() |
442 success = True | 1522 success = True |
443 if fit_failure: | 1523 if fit_failure: |
445 logging.warning(' -> Should not happen with param_constraint set, fail the fit') | 1525 logging.warning(' -> Should not happen with param_constraint set, fail the fit') |
446 success = False | 1526 success = False |
447 else: | 1527 else: |
448 logging.info(' -> Retry fitting with constraints') | 1528 logging.info(' -> Retry fitting with constraints') |
449 self.fit(centers, fit_type, peak_models, center_exprs, fwhm_max=fwhm_max, | 1529 self.fit(centers, fit_type, peak_models, center_exprs, fwhm_max=fwhm_max, |
450 background_order=background_order, plot_components=plot_components, | 1530 background_order=background_order, background_exp=background_exp, |
451 param_constraint=True) | 1531 plot_components=plot_components, param_constraint=True) |
452 else: | 1532 else: |
453 # Renormalize the data and results | 1533 # RV: Obsolete Renormalize the data and results |
454 self._renormalize() | 1534 # print('\nAfter fit before renormalization in FitMultipeak:') |
1535 # self._parameters.pretty_print() | |
1536 # self.print_fit_report() | |
1537 # self._renormalize() | |
1538 # print('\nAfter fit after renormalization in FitMultipeak:') | |
1539 # self._parameters.pretty_print() | |
1540 # self.print_fit_report() | |
455 | 1541 |
456 # Print report and plot components if requested | 1542 # Print report and plot components if requested |
457 if plot_components is not None: | 1543 if plot_components: |
458 self.print_fit_report() | 1544 self.print_fit_report() |
459 self.plot() | 1545 self.plot() |
460 | 1546 |
461 return success | 1547 return(success) |
462 | 1548 |
463 def _create_model(self, centers, fit_type=None, peak_models=None, center_exprs=None, | 1549 def _create_model(self, centers, fit_type=None, peak_models=None, center_exprs=None, |
464 background_order=None, param_constraint=False): | 1550 background_order=None, background_exp=False, param_constraint=False): |
465 """Create the multipeak model | 1551 """Create the multipeak model |
466 """ | 1552 """ |
467 if isinstance(centers, (int, float)): | 1553 if isinstance(centers, (int, float)): |
468 centers = [centers] | 1554 centers = [centers] |
469 num_peaks = len(centers) | 1555 num_peaks = len(centers) |
474 if len(peak_models) != num_peaks: | 1560 if len(peak_models) != num_peaks: |
475 raise ValueError(f'Inconsistent number of peaks in peak_models ({len(peak_models)} vs '+ | 1561 raise ValueError(f'Inconsistent number of peaks in peak_models ({len(peak_models)} vs '+ |
476 f'{num_peaks})') | 1562 f'{num_peaks})') |
477 if num_peaks == 1: | 1563 if num_peaks == 1: |
478 if fit_type is not None: | 1564 if fit_type is not None: |
479 logging.warning('Ignoring fit_type input for fitting one peak') | 1565 logging.debug('Ignoring fit_type input for fitting one peak') |
480 fit_type = None | 1566 fit_type = None |
481 if center_exprs is not None: | 1567 if center_exprs is not None: |
482 logging.warning('Ignoring center_exprs input for fitting one peak') | 1568 logging.debug('Ignoring center_exprs input for fitting one peak') |
483 center_exprs = None | 1569 center_exprs = None |
484 else: | 1570 else: |
485 if fit_type == 'uniform': | 1571 if fit_type == 'uniform': |
486 if center_exprs is None: | 1572 if center_exprs is None: |
487 center_exprs = [f'scale_factor*{cen}' for cen in centers] | 1573 center_exprs = [f'scale_factor*{cen}' for cen in centers] |
491 elif fit_type == 'unconstrained' or fit_type is None: | 1577 elif fit_type == 'unconstrained' or fit_type is None: |
492 if center_exprs is not None: | 1578 if center_exprs is not None: |
493 logging.warning('Ignoring center_exprs input for unconstrained fit') | 1579 logging.warning('Ignoring center_exprs input for unconstrained fit') |
494 center_exprs = None | 1580 center_exprs = None |
495 else: | 1581 else: |
496 raise ValueError(f'Illegal fit_type in fit_multigaussian {fit_type}') | 1582 raise ValueError(f'Invalid fit_type in fit_multigaussian {fit_type}') |
497 self._sigma_max = None | 1583 self._sigma_max = None |
498 if param_constraint: | 1584 if param_constraint: |
499 min_value = sys.float_info.min | 1585 min_value = float_min |
500 if self._fwhm_max is not None: | 1586 if self._fwhm_max is not None: |
501 self._sigma_max = np.zeros(num_peaks) | 1587 self._sigma_max = np.zeros(num_peaks) |
502 else: | 1588 else: |
503 min_value = None | 1589 min_value = None |
504 | 1590 |
508 self._result = None | 1594 self._result = None |
509 | 1595 |
510 # Add background model | 1596 # Add background model |
511 if background_order is not None: | 1597 if background_order is not None: |
512 if background_order == 0: | 1598 if background_order == 0: |
513 self.add_model('constant', prefix='background', c=0.0) | 1599 self.add_model('constant', prefix='background', parameters= |
1600 {'name': 'c', 'value': float_min, 'min': min_value}) | |
514 elif background_order == 1: | 1601 elif background_order == 1: |
515 self.add_model('linear', prefix='background', slope=0.0, intercept=0.0) | 1602 self.add_model('linear', prefix='background', slope=0.0, intercept=0.0) |
516 elif background_order == 2: | 1603 elif background_order == 2: |
517 self.add_model('quadratic', prefix='background', a=0.0, b=0.0, c=0.0) | 1604 self.add_model('quadratic', prefix='background', a=0.0, b=0.0, c=0.0) |
518 else: | 1605 else: |
519 raise ValueError(f'background_order = {background_order}') | 1606 raise ValueError(f'Invalid parameter background_order ({background_order})') |
1607 if background_exp: | |
1608 self.add_model('exponential', prefix='background', parameters=( | |
1609 {'name': 'amplitude', 'value': float_min, 'min': min_value}, | |
1610 {'name': 'decay', 'value': float_min, 'min': min_value})) | |
520 | 1611 |
521 # Add peaks and guess initial fit parameters | 1612 # Add peaks and guess initial fit parameters |
522 ast = Interpreter() | 1613 ast = Interpreter() |
523 if num_peaks == 1: | 1614 if num_peaks == 1: |
524 height_init, cen_init, fwhm_init = self.guess_init_peak(self._x, self._y) | 1615 height_init, cen_init, fwhm_init = self.guess_init_peak(self._x, self._y) |
532 if self._sigma_max is not None: | 1623 if self._sigma_max is not None: |
533 ast(f'fwhm = {self._fwhm_max}') | 1624 ast(f'fwhm = {self._fwhm_max}') |
534 sig_max = ast(fwhm_factor[peak_models[0]]) | 1625 sig_max = ast(fwhm_factor[peak_models[0]]) |
535 self._sigma_max[0] = sig_max | 1626 self._sigma_max[0] = sig_max |
536 self.add_model(peak_models[0], parameters=( | 1627 self.add_model(peak_models[0], parameters=( |
537 {'name' : 'amplitude', 'value' : amp_init, 'min' : min_value}, | 1628 {'name': 'amplitude', 'value': amp_init, 'min': min_value}, |
538 {'name' : 'center', 'value' : cen_init, 'min' : min_value}, | 1629 {'name': 'center', 'value': cen_init, 'min': min_value}, |
539 {'name' : 'sigma', 'value' : sig_init, 'min' : min_value, 'max' : sig_max})) | 1630 {'name': 'sigma', 'value': sig_init, 'min': min_value, 'max': sig_max})) |
540 else: | 1631 else: |
541 if fit_type == 'uniform': | 1632 if fit_type == 'uniform': |
542 self.add_parameter(name='scale_factor', value=1.0) | 1633 self.add_parameter(name='scale_factor', value=1.0) |
543 for i in range(num_peaks): | 1634 for i in range(num_peaks): |
544 height_init, cen_init, fwhm_init = self.guess_init_peak(self._x, self._y, i, | 1635 height_init, cen_init, fwhm_init = self.guess_init_peak(self._x, self._y, i, |
554 ast(f'fwhm = {self._fwhm_max}') | 1645 ast(f'fwhm = {self._fwhm_max}') |
555 sig_max = ast(fwhm_factor[peak_models[i]]) | 1646 sig_max = ast(fwhm_factor[peak_models[i]]) |
556 self._sigma_max[i] = sig_max | 1647 self._sigma_max[i] = sig_max |
557 if fit_type == 'uniform': | 1648 if fit_type == 'uniform': |
558 self.add_model(peak_models[i], prefix=f'peak{i+1}_', parameters=( | 1649 self.add_model(peak_models[i], prefix=f'peak{i+1}_', parameters=( |
559 {'name' : 'amplitude', 'value' : amp_init, 'min' : min_value}, | 1650 {'name': 'amplitude', 'value': amp_init, 'min': min_value}, |
560 {'name' : 'center', 'expr' : center_exprs[i], 'min' : min_value}, | 1651 {'name': 'center', 'expr': center_exprs[i]}, |
561 {'name' : 'sigma', 'value' : sig_init, 'min' : min_value, | 1652 {'name': 'sigma', 'value': sig_init, 'min': min_value, |
562 'max' : sig_max})) | 1653 'max': sig_max})) |
563 else: | 1654 else: |
564 self.add_model('gaussian', prefix=f'peak{i+1}_', parameters=( | 1655 self.add_model('gaussian', prefix=f'peak{i+1}_', parameters=( |
565 {'name' : 'amplitude', 'value' : amp_init, 'min' : min_value}, | 1656 {'name': 'amplitude', 'value': amp_init, 'min': min_value}, |
566 {'name' : 'center', 'value' : cen_init, 'min' : min_value}, | 1657 {'name': 'center', 'value': cen_init, 'min': min_value}, |
567 {'name' : 'sigma', 'value' : sig_init, 'min' : min_value, | 1658 {'name': 'sigma', 'value': sig_init, 'min': min_value, |
568 'max' : sig_max})) | 1659 'max': sig_max})) |
569 | 1660 |
570 def _check_validity(self): | 1661 def _check_validity(self): |
571 """Check for valid fit parameter results | 1662 """Check for valid fit parameter results |
572 """ | 1663 """ |
573 fit_failure = False | 1664 fit_failure = False |
574 index = re.compile(r'\d+') | 1665 index = compile(r'\d+') |
575 for parameter in self.best_parameters: | 1666 for name, par in self.best_parameters.items(): |
576 name = parameter['name'] | 1667 if 'background' in name: |
577 if ((('amplitude' in name or 'height' in name) and parameter['value'] <= 0.0) or | 1668 # if ((name == 'backgroundc' and par['value'] <= 0.0) or |
578 (('sigma' in name or 'fwhm' in name) and parameter['value'] <= 0.0) or | 1669 # (name.endswith('amplitude') and par['value'] <= 0.0) or |
579 ('center' in name and parameter['value'] <= 0.0) or | 1670 if ((name.endswith('amplitude') and par['value'] <= 0.0) or |
580 (name == 'scale_factor' and parameter['value'] <= 0.0)): | 1671 (name.endswith('decay') and par['value'] <= 0.0)): |
581 logging.info(f'Invalid fit result for {name} ({parameter["value"]})') | 1672 logging.info(f'Invalid fit result for {name} ({par["value"]})') |
1673 fit_failure = True | |
1674 elif (((name.endswith('amplitude') or name.endswith('height')) and | |
1675 par['value'] <= 0.0) or | |
1676 ((name.endswith('sigma') or name.endswith('fwhm')) and par['value'] <= 0.0) or | |
1677 (name.endswith('center') and par['value'] <= 0.0) or | |
1678 (name == 'scale_factor' and par['value'] <= 0.0)): | |
1679 logging.info(f'Invalid fit result for {name} ({par["value"]})') | |
582 fit_failure = True | 1680 fit_failure = True |
583 if 'sigma' in name and self._sigma_max is not None: | 1681 if name.endswith('sigma') and self._sigma_max is not None: |
584 if name == 'sigma': | 1682 if name == 'sigma': |
585 sigma_max = self._sigma_max[0] | 1683 sigma_max = self._sigma_max[0] |
586 else: | 1684 else: |
587 sigma_max = self._sigma_max[int(index.search(name).group())-1] | 1685 sigma_max = self._sigma_max[int(index.search(name).group())-1] |
588 i = int(index.search(name).group())-1 | 1686 if par['value'] > sigma_max: |
589 if parameter['value'] > sigma_max: | 1687 logging.info(f'Invalid fit result for {name} ({par["value"]})') |
590 logging.info(f'Invalid fit result for {name} ({parameter["value"]})') | |
591 fit_failure = True | 1688 fit_failure = True |
592 elif parameter['value'] == sigma_max: | 1689 elif par['value'] == sigma_max: |
593 logging.warning(f'Edge result on for {name} ({parameter["value"]})') | 1690 logging.warning(f'Edge result on for {name} ({par["value"]})') |
594 if 'fwhm' in name and self._fwhm_max is not None: | 1691 if name.endswith('fwhm') and self._fwhm_max is not None: |
595 if parameter['value'] > self._fwhm_max: | 1692 if par['value'] > self._fwhm_max: |
596 logging.info(f'Invalid fit result for {name} ({parameter["value"]})') | 1693 logging.info(f'Invalid fit result for {name} ({par["value"]})') |
597 fit_failure = True | 1694 fit_failure = True |
598 elif parameter['value'] == self._fwhm_max: | 1695 elif par['value'] == self._fwhm_max: |
599 logging.warning(f'Edge result on for {name} ({parameter["value"]})') | 1696 logging.warning(f'Edge result on for {name} ({par["value"]})') |
600 return fit_failure | 1697 return(fit_failure) |
601 | 1698 |
602 def _normalize(self): | 1699 |
603 """Normalize the data | 1700 class FitMap(Fit): |
1701 """Fit a map of data | |
1702 """ | |
1703 def __init__(self, ymap, x=None, models=None, normalize=True, transpose=None, **kwargs): | |
1704 super().__init__(None) | |
1705 self._best_errors = None | |
1706 self._best_fit = None | |
1707 self._best_parameters = None | |
1708 self._best_values = None | |
1709 self._inv_transpose = None | |
1710 self._max_nfev = None | |
1711 self._memfolder = None | |
1712 self._new_parameters = None | |
1713 self._out_of_bounds = None | |
1714 self._plot = False | |
1715 self._print_report = False | |
1716 self._redchi = None | |
1717 self._redchi_cutoff = 0.1 | |
1718 self._skip_init = True | |
1719 self._success = None | |
1720 self._transpose = None | |
1721 self._try_no_bounds = True | |
1722 | |
1723 # At this point the fastest index should always be the signal dimension so that the slowest | |
1724 # ndim-1 dimensions are the map dimensions | |
1725 if isinstance(ymap, (tuple, list, np.ndarray)): | |
1726 self._x = np.asarray(x) | |
1727 elif have_xarray and isinstance(ymap, xr.DataArray): | |
1728 if x is not None: | |
1729 logging.warning('Ignoring superfluous input x ({x}) in Fit.__init__') | |
1730 self._x = np.asarray(ymap[ymap.dims[-1]]) | |
1731 else: | |
1732 illegal_value(ymap, 'ymap', 'FitMap:__init__', raise_error=True) | |
1733 self._ymap = ymap | |
1734 | |
1735 # Verify the input parameters | |
1736 if self._x.ndim != 1: | |
1737 raise ValueError(f'Invalid dimension for input x {self._x.ndim}') | |
1738 if self._ymap.ndim < 2: | |
1739 raise ValueError('Invalid number of dimension of the input dataset '+ | |
1740 f'{self._ymap.ndim}') | |
1741 if self._x.size != self._ymap.shape[-1]: | |
1742 raise ValueError(f'Inconsistent x and y dimensions ({self._x.size} vs '+ | |
1743 f'{self._ymap.shape[-1]})') | |
1744 if not isinstance(normalize, bool): | |
1745 logging.warning(f'Invalid value for normalize ({normalize}) in Fit.__init__: '+ | |
1746 'setting normalize to True') | |
1747 normalize = True | |
1748 if isinstance(transpose, bool) and not transpose: | |
1749 transpose = None | |
1750 if transpose is not None and self._ymap.ndim < 3: | |
1751 logging.warning(f'Transpose meaningless for {self._ymap.ndim-1}D data maps: ignoring '+ | |
1752 'transpose') | |
1753 if transpose is not None: | |
1754 if self._ymap.ndim == 3 and isinstance(transpose, bool) and transpose: | |
1755 self._transpose = (1, 0) | |
1756 elif not isinstance(transpose, (tuple, list)): | |
1757 logging.warning(f'Invalid data type for transpose ({transpose}, '+ | |
1758 f'{type(transpose)}) in Fit.__init__: setting transpose to False') | |
1759 elif len(transpose) != self._ymap.ndim-1: | |
1760 logging.warning(f'Invalid dimension for transpose ({transpose}, must be equal to '+ | |
1761 f'{self._ymap.ndim-1}) in Fit.__init__: setting transpose to False') | |
1762 elif any(i not in transpose for i in range(len(transpose))): | |
1763 logging.warning(f'Invalid index in transpose ({transpose}) '+ | |
1764 f'in Fit.__init__: setting transpose to False') | |
1765 elif not all(i==transpose[i] for i in range(self._ymap.ndim-1)): | |
1766 self._transpose = transpose | |
1767 if self._transpose is not None: | |
1768 self._inv_transpose = tuple(self._transpose.index(i) | |
1769 for i in range(len(self._transpose))) | |
1770 | |
1771 # Flatten the map (transpose if requested) | |
1772 # Store the flattened map in self._ymap_norm, whether normalized or not | |
1773 if self._transpose is not None: | |
1774 self._ymap_norm = np.transpose(np.asarray(self._ymap), list(self._transpose)+ | |
1775 [len(self._transpose)]) | |
1776 else: | |
1777 self._ymap_norm = np.asarray(self._ymap) | |
1778 self._map_dim = int(self._ymap_norm.size/self._x.size) | |
1779 self._map_shape = self._ymap_norm.shape[:-1] | |
1780 self._ymap_norm = np.reshape(self._ymap_norm, (self._map_dim, self._x.size)) | |
1781 | |
1782 # Check if a mask is provided | |
1783 if 'mask' in kwargs: | |
1784 self._mask = kwargs.pop('mask') | |
1785 if self._mask is None: | |
1786 ymap_min = float(self._ymap_norm.min()) | |
1787 ymap_max = float(self._ymap_norm.max()) | |
1788 else: | |
1789 self._mask = np.asarray(self._mask).astype(bool) | |
1790 if self._x.size != self._mask.size: | |
1791 raise ValueError(f'Inconsistent mask dimension ({self._x.size} vs '+ | |
1792 f'{self._mask.size})') | |
1793 ymap_masked = np.asarray(self._ymap_norm)[:,~self._mask] | |
1794 ymap_min = float(ymap_masked.min()) | |
1795 ymap_max = float(ymap_masked.max()) | |
1796 | |
1797 # Normalize the data | |
1798 self._y_range = ymap_max-ymap_min | |
1799 if normalize and self._y_range > 0.0: | |
1800 self._norm = (ymap_min, self._y_range) | |
1801 self._ymap_norm = (self._ymap_norm-self._norm[0])/self._norm[1] | |
1802 else: | |
1803 self._redchi_cutoff *= self._y_range**2 | |
1804 if models is not None: | |
1805 if callable(models) or isinstance(models, str): | |
1806 kwargs = self.add_model(models, **kwargs) | |
1807 elif isinstance(models, (tuple, list)): | |
1808 for model in models: | |
1809 kwargs = self.add_model(model, **kwargs) | |
1810 self.fit(**kwargs) | |
1811 | |
1812 @classmethod | |
1813 def fit_map(cls, x, ymap, models, normalize=True, **kwargs): | |
1814 return(cls(x, ymap, models, normalize=normalize, **kwargs)) | |
1815 | |
1816 @property | |
1817 def best_errors(self): | |
1818 return(self._best_errors) | |
1819 | |
1820 @property | |
1821 def best_fit(self): | |
1822 return(self._best_fit) | |
1823 | |
1824 @property | |
1825 def best_results(self): | |
1826 """Convert the input data array to a data set and add the fit results. | |
604 """ | 1827 """ |
605 y_min = self._y.min() | 1828 if self.best_values is None or self.best_errors is None or self.best_fit is None: |
606 self._norm = (y_min, self._y.max()-y_min) | 1829 return(None) |
607 if self._norm[1] == 0.0: | 1830 if not have_xarray: |
608 self._norm = None | 1831 logging.warning('Unable to load xarray module') |
609 else: | 1832 return(None) |
610 self._y = (self._y-self._norm[0])/self._norm[1] | 1833 best_values = self.best_values |
611 | 1834 best_errors = self.best_errors |
612 def _renormalize(self): | 1835 if isinstance(self._ymap, xr.DataArray): |
613 """Renormalize the data and results | 1836 best_results = self._ymap.to_dataset() |
614 """ | 1837 dims = self._ymap.dims |
615 if self._norm is None: | 1838 fit_name = f'{self._ymap.name}_fit' |
1839 else: | |
1840 coords = {f'dim{n}_index':([f'dim{n}_index'], range(self._ymap.shape[n])) | |
1841 for n in range(self._ymap.ndim-1)} | |
1842 coords['x'] = (['x'], self._x) | |
1843 dims = list(coords.keys()) | |
1844 best_results = xr.Dataset(coords=coords) | |
1845 best_results['y'] = (dims, self._ymap) | |
1846 fit_name = 'y_fit' | |
1847 best_results[fit_name] = (dims, self.best_fit) | |
1848 if self._mask is not None: | |
1849 best_results['mask'] = self._mask | |
1850 for n in range(best_values.shape[0]): | |
1851 best_results[f'{self._best_parameters[n]}_values'] = (dims[:-1], best_values[n]) | |
1852 best_results[f'{self._best_parameters[n]}_errors'] = (dims[:-1], best_errors[n]) | |
1853 best_results.attrs['components'] = self.components | |
1854 return(best_results) | |
1855 | |
1856 @property | |
1857 def best_values(self): | |
1858 return(self._best_values) | |
1859 | |
1860 @property | |
1861 def chisqr(self): | |
1862 logging.warning('property chisqr not defined for fit.FitMap') | |
1863 return(None) | |
1864 | |
1865 @property | |
1866 def components(self): | |
1867 components = {} | |
1868 if self._result is None: | |
1869 logging.warning('Unable to collect components in FitMap.components') | |
1870 return(components) | |
1871 for component in self._result.components: | |
1872 if 'tmp_normalization_offset_c' in component.param_names: | |
1873 continue | |
1874 parameters = {} | |
1875 for name in component.param_names: | |
1876 if self._parameters[name].vary: | |
1877 parameters[name] = {'free': True} | |
1878 elif self._parameters[name].expr is not None: | |
1879 parameters[name] = {'free': False, 'expr': self._parameters[name].expr} | |
1880 else: | |
1881 parameters[name] = {'free': False, 'value': self.init_parameters[name]['value']} | |
1882 expr = None | |
1883 if isinstance(component, ExpressionModel): | |
1884 name = component._name | |
1885 if name[-1] == '_': | |
1886 name = name[:-1] | |
1887 expr = component.expr | |
1888 else: | |
1889 prefix = component.prefix | |
1890 if len(prefix): | |
1891 if prefix[-1] == '_': | |
1892 prefix = prefix[:-1] | |
1893 name = f'{prefix} ({component._name})' | |
1894 else: | |
1895 name = f'{component._name}' | |
1896 if expr is None: | |
1897 components[name] = {'parameters': parameters} | |
1898 else: | |
1899 components[name] = {'expr': expr, 'parameters': parameters} | |
1900 return(components) | |
1901 | |
1902 @property | |
1903 def covar(self): | |
1904 logging.warning('property covar not defined for fit.FitMap') | |
1905 return(None) | |
1906 | |
1907 @property | |
1908 def max_nfev(self): | |
1909 return(self._max_nfev) | |
1910 | |
1911 @property | |
1912 def num_func_eval(self): | |
1913 logging.warning('property num_func_eval not defined for fit.FitMap') | |
1914 return(None) | |
1915 | |
1916 @property | |
1917 def out_of_bounds(self): | |
1918 return(self._out_of_bounds) | |
1919 | |
1920 @property | |
1921 def redchi(self): | |
1922 return(self._redchi) | |
1923 | |
1924 @property | |
1925 def residual(self): | |
1926 if self.best_fit is None: | |
1927 return(None) | |
1928 if self._mask is None: | |
1929 return(np.asarray(self._ymap)-self.best_fit) | |
1930 else: | |
1931 ymap_flat = np.reshape(np.asarray(self._ymap), (self._map_dim, self._x.size)) | |
1932 ymap_flat_masked = ymap_flat[:,~self._mask] | |
1933 ymap_masked = np.reshape(ymap_flat_masked, | |
1934 list(self._map_shape)+[ymap_flat_masked.shape[-1]]) | |
1935 return(ymap_masked-self.best_fit) | |
1936 | |
1937 @property | |
1938 def success(self): | |
1939 return(self._success) | |
1940 | |
1941 @property | |
1942 def var_names(self): | |
1943 logging.warning('property var_names not defined for fit.FitMap') | |
1944 return(None) | |
1945 | |
1946 @property | |
1947 def y(self): | |
1948 logging.warning('property y not defined for fit.FitMap') | |
1949 return(None) | |
1950 | |
1951 @property | |
1952 def ymap(self): | |
1953 return(self._ymap) | |
1954 | |
1955 def best_parameters(self, dims=None): | |
1956 if dims is None: | |
1957 return(self._best_parameters) | |
1958 if not isinstance(dims, (list, tuple)) or len(dims) != len(self._map_shape): | |
1959 illegal_value(dims, 'dims', 'FitMap.best_parameters', raise_error=True) | |
1960 if self.best_values is None or self.best_errors is None: | |
1961 logging.warning(f'Unable to obtain best parameter values for dims = {dims} in '+ | |
1962 'FitMap.best_parameters') | |
1963 return({}) | |
1964 # Create current parameters | |
1965 parameters = deepcopy(self._parameters) | |
1966 for n, name in enumerate(self._best_parameters): | |
1967 if self._parameters[name].vary: | |
1968 parameters[name].set(value=self.best_values[n][dims]) | |
1969 parameters[name].stderr = self.best_errors[n][dims] | |
1970 parameters_dict = {} | |
1971 for name in sorted(parameters): | |
1972 if name != 'tmp_normalization_offset_c': | |
1973 par = parameters[name] | |
1974 parameters_dict[name] = {'value': par.value, 'error': par.stderr, | |
1975 'init_value': self.init_parameters[name]['value'], 'min': par.min, | |
1976 'max': par.max, 'vary': par.vary, 'expr': par.expr} | |
1977 return(parameters_dict) | |
1978 | |
1979 def freemem(self): | |
1980 if self._memfolder is None: | |
616 return | 1981 return |
617 self._y = self._norm[0]+self._norm[1]*self._y | 1982 try: |
618 self._result.best_fit = self._norm[0]+self._norm[1]*self._result.best_fit | 1983 rmtree(self._memfolder) |
619 for name in self._result.params: | 1984 self._memfolder = None |
620 par = self._result.params[name] | 1985 except: |
621 if 'amplitude' in name or 'height' in name or 'background' in name: | 1986 logging.warning('Could not clean-up automatically.') |
622 par.value *= self._norm[1] | 1987 |
623 if par.stderr is not None: | 1988 def plot(self, dims, y_title=None, plot_residual=False, plot_comp_legends=False, |
624 par.stderr *= self._norm[1] | 1989 plot_masked_data=True): |
625 if par.init_value is not None: | 1990 if not isinstance(dims, (list, tuple)) or len(dims) != len(self._map_shape): |
626 par.init_value *= self._norm[1] | 1991 illegal_value(dims, 'dims', 'FitMap.plot', raise_error=True) |
627 if par.min is not None and not np.isinf(par.min): | 1992 if self._result is None or self.best_fit is None or self.best_values is None: |
628 par.min *= self._norm[1] | 1993 logging.warning(f'Unable to plot fit for dims = {dims} in FitMap.plot') |
629 if par.max is not None and not np.isinf(par.max): | 1994 return |
630 par.max *= self._norm[1] | 1995 if y_title is None or not isinstance(y_title, str): |
631 if 'intercept' in name or 'backgroundc' in name: | 1996 y_title = 'data' |
632 par.value += self._norm[0] | 1997 if self._mask is None: |
633 if par.init_value is not None: | 1998 mask = np.zeros(self._x.size).astype(bool) |
634 par.init_value += self._norm[0] | 1999 plot_masked_data = False |
635 if par.min is not None and not np.isinf(par.min): | 2000 else: |
636 par.min += self._norm[0] | 2001 mask = self._mask |
637 if par.max is not None and not np.isinf(par.max): | 2002 plots = [(self._x, np.asarray(self._ymap[dims]), 'b.')] |
638 par.max += self._norm[0] | 2003 legend = [y_title] |
639 self._result.init_fit = self._norm[0]+self._norm[1]*self._result.init_fit | 2004 if plot_masked_data: |
640 init_values = {} | 2005 plots += [(self._x[mask], np.asarray(self._ymap)[(*dims,mask)], 'bx')] |
641 for name in self._result.init_values: | 2006 legend += ['masked data'] |
642 init_values[name] = self._result.init_values[name] | 2007 plots += [(self._x[~mask], self.best_fit[dims], 'k-')] |
643 if init_values[name] is None: | 2008 legend += ['best fit'] |
2009 if plot_residual: | |
2010 plots += [(self._x[~mask], self.residual[dims], 'k--')] | |
2011 legend += ['residual'] | |
2012 # Create current parameters | |
2013 parameters = deepcopy(self._parameters) | |
2014 for name in self._best_parameters: | |
2015 if self._parameters[name].vary: | |
2016 parameters[name].set(value= | |
2017 self.best_values[self._best_parameters.index(name)][dims]) | |
2018 for component in self._result.components: | |
2019 if 'tmp_normalization_offset_c' in component.param_names: | |
644 continue | 2020 continue |
645 if 'amplitude' in name or 'height' in name or 'background' in name: | 2021 if isinstance(component, ExpressionModel): |
646 init_values[name] *= self._norm[1] | 2022 prefix = component._name |
647 if 'intercept' in name or 'backgroundc' in name: | 2023 if prefix[-1] == '_': |
648 init_values[name] += self._norm[0] | 2024 prefix = prefix[:-1] |
649 self._result.init_values = init_values | 2025 modelname = f'{prefix}: {component.expr}' |
650 # Don't renormalized chisqr, it has no useful meaning in physical units | 2026 else: |
651 #self._result.chisqr *= self._norm[1]*self._norm[1] | 2027 prefix = component.prefix |
652 if self._result.covar is not None: | 2028 if len(prefix): |
653 for i, name in enumerate(self._result.var_names): | 2029 if prefix[-1] == '_': |
654 if 'amplitude' in name or 'height' in name or 'background' in name: | 2030 prefix = prefix[:-1] |
655 for j in range(len(self._result.var_names)): | 2031 modelname = f'{prefix} ({component._name})' |
656 if self._result.covar[i,j] is not None: | 2032 else: |
657 self._result.covar[i,j] *= self._norm[1] | 2033 modelname = f'{component._name}' |
658 if self._result.covar[j,i] is not None: | 2034 if len(modelname) > 20: |
659 self._result.covar[j,i] *= self._norm[1] | 2035 modelname = f'{modelname[0:16]} ...' |
660 # Don't renormalized redchi, it has no useful meaning in physical units | 2036 y = component.eval(params=parameters, x=self._x[~mask]) |
661 #self._result.redchi *= self._norm[1]*self._norm[1] | 2037 if isinstance(y, (int, float)): |
662 self._result.residual *= self._norm[1] | 2038 y *= np.ones(self._x[~mask].size) |
2039 plots += [(self._x[~mask], y, '--')] | |
2040 if plot_comp_legends: | |
2041 legend.append(modelname) | |
2042 quick_plot(tuple(plots), legend=legend, title=str(dims), block=True) | |
2043 | |
2044 def fit(self, **kwargs): | |
2045 # t0 = time() | |
2046 # Check input parameters | |
2047 if self._model is None: | |
2048 logging.error('Undefined fit model') | |
2049 if 'num_proc' in kwargs: | |
2050 num_proc = kwargs.pop('num_proc') | |
2051 if not is_int(num_proc, ge=1): | |
2052 illegal_value(num_proc, 'num_proc', 'FitMap.fit', raise_error=True) | |
2053 else: | |
2054 num_proc = cpu_count() | |
2055 if num_proc > 1 and not have_joblib: | |
2056 logging.warning(f'Missing joblib in the conda environment, running FitMap serially') | |
2057 num_proc = 1 | |
2058 if num_proc > cpu_count(): | |
2059 logging.warning(f'The requested number of processors ({num_proc}) exceeds the maximum '+ | |
2060 f'number of processors, num_proc reduced to ({cpu_count()})') | |
2061 num_proc = cpu_count() | |
2062 if 'try_no_bounds' in kwargs: | |
2063 self._try_no_bounds = kwargs.pop('try_no_bounds') | |
2064 if not isinstance(self._try_no_bounds, bool): | |
2065 illegal_value(self._try_no_bounds, 'try_no_bounds', 'FitMap.fit', raise_error=True) | |
2066 if 'redchi_cutoff' in kwargs: | |
2067 self._redchi_cutoff = kwargs.pop('redchi_cutoff') | |
2068 if not is_num(self._redchi_cutoff, gt=0): | |
2069 illegal_value(self._redchi_cutoff, 'redchi_cutoff', 'FitMap.fit', raise_error=True) | |
2070 if 'print_report' in kwargs: | |
2071 self._print_report = kwargs.pop('print_report') | |
2072 if not isinstance(self._print_report, bool): | |
2073 illegal_value(self._print_report, 'print_report', 'FitMap.fit', raise_error=True) | |
2074 if 'plot' in kwargs: | |
2075 self._plot = kwargs.pop('plot') | |
2076 if not isinstance(self._plot, bool): | |
2077 illegal_value(self._plot, 'plot', 'FitMap.fit', raise_error=True) | |
2078 if 'skip_init' in kwargs: | |
2079 self._skip_init = kwargs.pop('skip_init') | |
2080 if not isinstance(self._skip_init, bool): | |
2081 illegal_value(self._skip_init, 'skip_init', 'FitMap.fit', raise_error=True) | |
2082 | |
2083 # Apply mask if supplied: | |
2084 if 'mask' in kwargs: | |
2085 self._mask = kwargs.pop('mask') | |
2086 if self._mask is not None: | |
2087 self._mask = np.asarray(self._mask).astype(bool) | |
2088 if self._x.size != self._mask.size: | |
2089 raise ValueError(f'Inconsistent x and mask dimensions ({self._x.size} vs '+ | |
2090 f'{self._mask.size})') | |
2091 | |
2092 # Add constant offset for a normalized single component model | |
2093 if self._result is None and self._norm is not None and self._norm[0]: | |
2094 self.add_model('constant', prefix='tmp_normalization_offset_', parameters={'name': 'c', | |
2095 'value': -self._norm[0], 'vary': False, 'norm': True}) | |
2096 #'value': -self._norm[0]/self._norm[1], 'vary': False, 'norm': False}) | |
2097 | |
2098 # Adjust existing parameters for refit: | |
2099 if 'parameters' in kwargs: | |
2100 # print('\nIn FitMap before adjusting existing parameters for refit:') | |
2101 # self._parameters.pretty_print() | |
2102 # if self._result is None: | |
2103 # raise ValueError('Invalid parameter parameters ({parameters})') | |
2104 # if self._best_values is None: | |
2105 # raise ValueError('Valid self._best_values required for refitting in FitMap.fit') | |
2106 parameters = kwargs.pop('parameters') | |
2107 # print(f'\nparameters:\n{parameters}') | |
2108 if isinstance(parameters, dict): | |
2109 parameters = (parameters, ) | |
2110 elif not is_dict_series(parameters): | |
2111 illegal_value(parameters, 'parameters', 'Fit.fit', raise_error=True) | |
2112 for par in parameters: | |
2113 name = par['name'] | |
2114 if name not in self._parameters: | |
2115 raise ValueError(f'Unable to match {name} parameter {par} to an existing one') | |
2116 if self._parameters[name].expr is not None: | |
2117 raise ValueError(f'Unable to modify {name} parameter {par} (currently an '+ | |
2118 'expression)') | |
2119 value = par.get('value') | |
2120 vary = par.get('vary') | |
2121 if par.get('expr') is not None: | |
2122 raise KeyError(f'Illegal "expr" key in {name} parameter {par}') | |
2123 self._parameters[name].set(value=value, vary=vary, min=par.get('min'), | |
2124 max=par.get('max')) | |
2125 # Overwrite existing best values for fixed parameters when a value is specified | |
2126 # print(f'best values befored resetting:\n{self._best_values}') | |
2127 if isinstance(value, (int, float)) and vary is False: | |
2128 for i, nname in enumerate(self._best_parameters): | |
2129 if nname == name: | |
2130 self._best_values[i] = value | |
2131 # print(f'best values after resetting (value={value}, vary={vary}):\n{self._best_values}') | |
2132 #RV print('\nIn FitMap after adjusting existing parameters for refit:') | |
2133 #RV self._parameters.pretty_print() | |
2134 | |
2135 # Check for uninitialized parameters | |
2136 for name, par in self._parameters.items(): | |
2137 if par.expr is None: | |
2138 value = par.value | |
2139 if value is None or np.isinf(value) or np.isnan(value): | |
2140 value = 1.0 | |
2141 if self._norm is None or name not in self._parameter_norms: | |
2142 self._parameters[name].set(value=value) | |
2143 elif self._parameter_norms[name]: | |
2144 self._parameters[name].set(value=value*self._norm[1]) | |
2145 | |
2146 # Create the best parameter list, consisting of all varying parameters plus the expression | |
2147 # parameters in order to collect their errors | |
2148 if self._result is None: | |
2149 # Initial fit | |
2150 assert(self._best_parameters is None) | |
2151 self._best_parameters = [name for name, par in self._parameters.items() | |
2152 if par.vary or par.expr is not None] | |
2153 num_new_parameters = 0 | |
2154 else: | |
2155 # Refit | |
2156 assert(len(self._best_parameters)) | |
2157 self._new_parameters = [name for name, par in self._parameters.items() | |
2158 if name != 'tmp_normalization_offset_c' and name not in self._best_parameters and | |
2159 (par.vary or par.expr is not None)] | |
2160 num_new_parameters = len(self._new_parameters) | |
2161 num_best_parameters = len(self._best_parameters) | |
2162 | |
2163 # Flatten and normalize the best values of the previous fit, remove the remaining results | |
2164 # of the previous fit | |
2165 if self._result is not None: | |
2166 # print('\nBefore flatten and normalize:') | |
2167 # print(f'self._best_values:\n{self._best_values}') | |
2168 self._out_of_bounds = None | |
2169 self._max_nfev = None | |
2170 self._redchi = None | |
2171 self._success = None | |
2172 self._best_fit = None | |
2173 self._best_errors = None | |
2174 assert(self._best_values is not None) | |
2175 assert(self._best_values.shape[0] == num_best_parameters) | |
2176 assert(self._best_values.shape[1:] == self._map_shape) | |
2177 if self._transpose is not None: | |
2178 self._best_values = np.transpose(self._best_values, | |
2179 [0]+[i+1 for i in self._transpose]) | |
2180 self._best_values = [np.reshape(self._best_values[i], self._map_dim) | |
2181 for i in range(num_best_parameters)] | |
2182 if self._norm is not None: | |
2183 for i, name in enumerate(self._best_parameters): | |
2184 if self._parameter_norms.get(name, False): | |
2185 self._best_values[i] /= self._norm[1] | |
2186 #RV print('\nAfter flatten and normalize:') | |
2187 #RV print(f'self._best_values:\n{self._best_values}') | |
2188 | |
2189 # Normalize the initial parameters (and best values for a refit) | |
2190 # print('\nIn FitMap before normalize:') | |
2191 # self._parameters.pretty_print() | |
2192 # print(f'\nparameter_norms:\n{self._parameter_norms}\n') | |
2193 self._normalize() | |
2194 # print('\nIn FitMap after normalize:') | |
2195 # self._parameters.pretty_print() | |
2196 # print(f'\nparameter_norms:\n{self._parameter_norms}\n') | |
2197 | |
2198 # Prevent initial values from sitting at boundaries | |
2199 self._parameter_bounds = {name:{'min': par.min, 'max': par.max} | |
2200 for name, par in self._parameters.items() if par.vary} | |
2201 for name, par in self._parameters.items(): | |
2202 if par.vary: | |
2203 par.set(value=self._reset_par_at_boundary(par, par.value)) | |
2204 # print('\nAfter checking boundaries:') | |
2205 # self._parameters.pretty_print() | |
2206 | |
2207 # Set parameter bounds to unbound (only use bounds when fit fails) | |
2208 if self._try_no_bounds: | |
2209 for name in self._parameter_bounds.keys(): | |
2210 self._parameters[name].set(min=-np.inf, max=np.inf) | |
2211 | |
2212 # Allocate memory to store fit results | |
2213 if self._mask is None: | |
2214 x_size = self._x.size | |
2215 else: | |
2216 x_size = self._x[~self._mask].size | |
2217 if num_proc == 1: | |
2218 self._out_of_bounds_flat = np.zeros(self._map_dim, dtype=bool) | |
2219 self._max_nfev_flat = np.zeros(self._map_dim, dtype=bool) | |
2220 self._redchi_flat = np.zeros(self._map_dim, dtype=np.float64) | |
2221 self._success_flat = np.zeros(self._map_dim, dtype=bool) | |
2222 self._best_fit_flat = np.zeros((self._map_dim, x_size), | |
2223 dtype=self._ymap_norm.dtype) | |
2224 self._best_errors_flat = [np.zeros(self._map_dim, dtype=np.float64) | |
2225 for _ in range(num_best_parameters+num_new_parameters)] | |
2226 if self._result is None: | |
2227 self._best_values_flat = [np.zeros(self._map_dim, dtype=np.float64) | |
2228 for _ in range(num_best_parameters)] | |
2229 else: | |
2230 self._best_values_flat = self._best_values | |
2231 self._best_values_flat += [np.zeros(self._map_dim, dtype=np.float64) | |
2232 for _ in range(num_new_parameters)] | |
2233 else: | |
2234 self._memfolder = './joblib_memmap' | |
2235 try: | |
2236 mkdir(self._memfolder) | |
2237 except FileExistsError: | |
2238 pass | |
2239 filename_memmap = path.join(self._memfolder, 'out_of_bounds_memmap') | |
2240 self._out_of_bounds_flat = np.memmap(filename_memmap, dtype=bool, | |
2241 shape=(self._map_dim), mode='w+') | |
2242 filename_memmap = path.join(self._memfolder, 'max_nfev_memmap') | |
2243 self._max_nfev_flat = np.memmap(filename_memmap, dtype=bool, | |
2244 shape=(self._map_dim), mode='w+') | |
2245 filename_memmap = path.join(self._memfolder, 'redchi_memmap') | |
2246 self._redchi_flat = np.memmap(filename_memmap, dtype=np.float64, | |
2247 shape=(self._map_dim), mode='w+') | |
2248 filename_memmap = path.join(self._memfolder, 'success_memmap') | |
2249 self._success_flat = np.memmap(filename_memmap, dtype=bool, | |
2250 shape=(self._map_dim), mode='w+') | |
2251 filename_memmap = path.join(self._memfolder, 'best_fit_memmap') | |
2252 self._best_fit_flat = np.memmap(filename_memmap, dtype=self._ymap_norm.dtype, | |
2253 shape=(self._map_dim, x_size), mode='w+') | |
2254 self._best_errors_flat = [] | |
2255 for i in range(num_best_parameters+num_new_parameters): | |
2256 filename_memmap = path.join(self._memfolder, f'best_errors_memmap_{i}') | |
2257 self._best_errors_flat.append(np.memmap(filename_memmap, dtype=np.float64, | |
2258 shape=self._map_dim, mode='w+')) | |
2259 self._best_values_flat = [] | |
2260 for i in range(num_best_parameters): | |
2261 filename_memmap = path.join(self._memfolder, f'best_values_memmap_{i}') | |
2262 self._best_values_flat.append(np.memmap(filename_memmap, dtype=np.float64, | |
2263 shape=self._map_dim, mode='w+')) | |
2264 if self._result is not None: | |
2265 self._best_values_flat[i][:] = self._best_values[i][:] | |
2266 for i in range(num_new_parameters): | |
2267 filename_memmap = path.join(self._memfolder, | |
2268 f'best_values_memmap_{i+num_best_parameters}') | |
2269 self._best_values_flat.append(np.memmap(filename_memmap, dtype=np.float64, | |
2270 shape=self._map_dim, mode='w+')) | |
2271 | |
2272 # Update the best parameter list | |
2273 if num_new_parameters: | |
2274 self._best_parameters += self._new_parameters | |
2275 | |
2276 # Perform the first fit to get model component info and initial parameters | |
2277 current_best_values = {} | |
2278 # print(f'0 before:\n{current_best_values}') | |
2279 # t1 = time() | |
2280 self._result = self._fit(0, current_best_values, return_result=True, **kwargs) | |
2281 # t2 = time() | |
2282 # print(f'0 after:\n{current_best_values}') | |
2283 # print('\nAfter the first fit:') | |
2284 # self._parameters.pretty_print() | |
2285 # print(self._result.fit_report(show_correl=False)) | |
2286 | |
2287 # Remove all irrelevant content from self._result | |
2288 for attr in ('_abort', 'aborted', 'aic', 'best_fit', 'best_values', 'bic', 'calc_covar', | |
2289 'call_kws', 'chisqr', 'ci_out', 'col_deriv', 'covar', 'data', 'errorbars', | |
2290 'flatchain', 'ier', 'init_vals', 'init_fit', 'iter_cb', 'jacfcn', 'kws', | |
2291 'last_internal_values', 'lmdif_message', 'message', 'method', 'nan_policy', | |
2292 'ndata', 'nfev', 'nfree', 'params', 'redchi', 'reduce_fcn', 'residual', 'result', | |
2293 'scale_covar', 'show_candidates', 'calc_covar', 'success', 'userargs', 'userfcn', | |
2294 'userkws', 'values', 'var_names', 'weights', 'user_options'): | |
2295 try: | |
2296 delattr(self._result, attr) | |
2297 except AttributeError: | |
2298 # logging.warning(f'Unknown attribute {attr} in fit.FtMap._cleanup_result') | |
2299 pass | |
2300 | |
2301 # t3 = time() | |
2302 if num_proc == 1: | |
2303 # Perform the remaining fits serially | |
2304 for n in range(1, self._map_dim): | |
2305 # print(f'{n} before:\n{current_best_values}') | |
2306 self._fit(n, current_best_values, **kwargs) | |
2307 # print(f'{n} after:\n{current_best_values}') | |
2308 else: | |
2309 # Perform the remaining fits in parallel | |
2310 num_fit = self._map_dim-1 | |
2311 # print(f'num_fit = {num_fit}') | |
2312 if num_proc > num_fit: | |
2313 logging.warning(f'The requested number of processors ({num_proc}) exceeds the '+ | |
2314 f'number of fits, num_proc reduced to ({num_fit})') | |
2315 num_proc = num_fit | |
2316 num_fit_per_proc = 1 | |
2317 else: | |
2318 num_fit_per_proc = round((num_fit)/num_proc) | |
2319 if num_proc*num_fit_per_proc < num_fit: | |
2320 num_fit_per_proc +=1 | |
2321 # print(f'num_fit_per_proc = {num_fit_per_proc}') | |
2322 num_fit_batch = min(num_fit_per_proc, 40) | |
2323 # print(f'num_fit_batch = {num_fit_batch}') | |
2324 with Parallel(n_jobs=num_proc) as parallel: | |
2325 parallel(delayed(self._fit_parallel)(current_best_values, num_fit_batch, | |
2326 n_start, **kwargs) for n_start in range(1, self._map_dim, num_fit_batch)) | |
2327 # t4 = time() | |
2328 | |
2329 # Renormalize the initial parameters for external use | |
2330 if self._norm is not None and self._normalized: | |
2331 init_values = {} | |
2332 for name, value in self._result.init_values.items(): | |
2333 if name not in self._parameter_norms or self._parameters[name].expr is not None: | |
2334 init_values[name] = value | |
2335 elif self._parameter_norms[name]: | |
2336 init_values[name] = value*self._norm[1] | |
2337 self._result.init_values = init_values | |
2338 for name, par in self._result.init_params.items(): | |
2339 if par.expr is None and self._parameter_norms.get(name, False): | |
2340 _min = par.min | |
2341 _max = par.max | |
2342 value = par.value*self._norm[1] | |
2343 if not np.isinf(_min) and abs(_min) != float_min: | |
2344 _min *= self._norm[1] | |
2345 if not np.isinf(_max) and abs(_max) != float_min: | |
2346 _max *= self._norm[1] | |
2347 par.set(value=value, min=_min, max=_max) | |
2348 par.init_value = par.value | |
2349 | |
2350 # Remap the best results | |
2351 # t5 = time() | |
2352 self._out_of_bounds = np.copy(np.reshape(self._out_of_bounds_flat, self._map_shape)) | |
2353 self._max_nfev = np.copy(np.reshape(self._max_nfev_flat, self._map_shape)) | |
2354 self._redchi = np.copy(np.reshape(self._redchi_flat, self._map_shape)) | |
2355 self._success = np.copy(np.reshape(self._success_flat, self._map_shape)) | |
2356 self._best_fit = np.copy(np.reshape(self._best_fit_flat, | |
2357 list(self._map_shape)+[x_size])) | |
2358 self._best_values = np.asarray([np.reshape(par, list(self._map_shape)) | |
2359 for par in self._best_values_flat]) | |
2360 self._best_errors = np.asarray([np.reshape(par, list(self._map_shape)) | |
2361 for par in self._best_errors_flat]) | |
2362 if self._inv_transpose is not None: | |
2363 self._out_of_bounds = np.transpose(self._out_of_bounds, self._inv_transpose) | |
2364 self._max_nfev = np.transpose(self._max_nfev, self._inv_transpose) | |
2365 self._redchi = np.transpose(self._redchi, self._inv_transpose) | |
2366 self._success = np.transpose(self._success, self._inv_transpose) | |
2367 self._best_fit = np.transpose(self._best_fit, | |
2368 list(self._inv_transpose)+[len(self._inv_transpose)]) | |
2369 self._best_values = np.transpose(self._best_values, | |
2370 [0]+[i+1 for i in self._inv_transpose]) | |
2371 self._best_errors = np.transpose(self._best_errors, | |
2372 [0]+[i+1 for i in self._inv_transpose]) | |
2373 del self._out_of_bounds_flat | |
2374 del self._max_nfev_flat | |
2375 del self._redchi_flat | |
2376 del self._success_flat | |
2377 del self._best_fit_flat | |
2378 del self._best_values_flat | |
2379 del self._best_errors_flat | |
2380 # t6 = time() | |
2381 | |
2382 # Restore parameter bounds and renormalize the parameters | |
2383 for name, par in self._parameter_bounds.items(): | |
2384 self._parameters[name].set(min=par['min'], max=par['max']) | |
2385 self._normalized = False | |
2386 if self._norm is not None: | |
2387 for name, norm in self._parameter_norms.items(): | |
2388 par = self._parameters[name] | |
2389 if par.expr is None and norm: | |
2390 value = par.value*self._norm[1] | |
2391 _min = par.min | |
2392 _max = par.max | |
2393 if not np.isinf(_min) and abs(_min) != float_min: | |
2394 _min *= self._norm[1] | |
2395 if not np.isinf(_max) and abs(_max) != float_min: | |
2396 _max *= self._norm[1] | |
2397 par.set(value=value, min=_min, max=_max) | |
2398 # t7 = time() | |
2399 # print(f'total run time in fit: {t7-t0:.2f} seconds') | |
2400 # print(f'run time first fit: {t2-t1:.2f} seconds') | |
2401 # print(f'run time remaining fits: {t4-t3:.2f} seconds') | |
2402 # print(f'run time remapping results: {t6-t5:.2f} seconds') | |
2403 | |
2404 # print('\n\nAt end fit:') | |
2405 # self._parameters.pretty_print() | |
2406 # print(f'self._best_values:\n{self._best_values}\n\n') | |
2407 | |
2408 # Free the shared memory | |
2409 self.freemem() | |
2410 | |
2411 def _fit_parallel(self, current_best_values, num, n_start, **kwargs): | |
2412 num = min(num, self._map_dim-n_start) | |
2413 for n in range(num): | |
2414 # print(f'{n_start+n} before:\n{current_best_values}') | |
2415 self._fit(n_start+n, current_best_values, **kwargs) | |
2416 # print(f'{n_start+n} after:\n{current_best_values}') | |
2417 | |
2418 def _fit(self, n, current_best_values, return_result=False, **kwargs): | |
2419 #RV print(f'\n\nstart FitMap._fit {n}\n') | |
2420 #RV print(f'current_best_values = {current_best_values}') | |
2421 #RV print(f'self._best_parameters = {self._best_parameters}') | |
2422 #RV print(f'self._new_parameters = {self._new_parameters}\n\n') | |
2423 # self._parameters.pretty_print() | |
2424 # Set parameters to current best values, but prevent them from sitting at boundaries | |
2425 if self._new_parameters is None: | |
2426 # Initial fit | |
2427 for name, value in current_best_values.items(): | |
2428 par = self._parameters[name] | |
2429 par.set(value=self._reset_par_at_boundary(par, value)) | |
2430 else: | |
2431 # Refit | |
2432 for i, name in enumerate(self._best_parameters): | |
2433 par = self._parameters[name] | |
2434 if name in self._new_parameters: | |
2435 if name in current_best_values: | |
2436 par.set(value=self._reset_par_at_boundary(par, current_best_values[name])) | |
2437 elif par.expr is None: | |
2438 par.set(value=self._best_values[i][n]) | |
2439 #RV print(f'\nbefore fit {n}') | |
2440 #RV self._parameters.pretty_print() | |
2441 if self._mask is None: | |
2442 result = self._model.fit(self._ymap_norm[n], self._parameters, x=self._x, **kwargs) | |
2443 else: | |
2444 result = self._model.fit(self._ymap_norm[n][~self._mask], self._parameters, | |
2445 x=self._x[~self._mask], **kwargs) | |
2446 # print(f'\nafter fit {n}') | |
2447 # self._parameters.pretty_print() | |
2448 # print(result.fit_report(show_correl=False)) | |
2449 out_of_bounds = False | |
2450 for name, par in self._parameter_bounds.items(): | |
2451 value = result.params[name].value | |
2452 if not np.isinf(par['min']) and value < par['min']: | |
2453 out_of_bounds = True | |
2454 break | |
2455 if not np.isinf(par['max']) and value > par['max']: | |
2456 out_of_bounds = True | |
2457 break | |
2458 self._out_of_bounds_flat[n] = out_of_bounds | |
2459 if self._try_no_bounds and out_of_bounds: | |
2460 # Rerun fit with parameter bounds in place | |
2461 for name, par in self._parameter_bounds.items(): | |
2462 self._parameters[name].set(min=par['min'], max=par['max']) | |
2463 # Set parameters to current best values, but prevent them from sitting at boundaries | |
2464 if self._new_parameters is None: | |
2465 # Initial fit | |
2466 for name, value in current_best_values.items(): | |
2467 par = self._parameters[name] | |
2468 par.set(value=self._reset_par_at_boundary(par, value)) | |
2469 else: | |
2470 # Refit | |
2471 for i, name in enumerate(self._best_parameters): | |
2472 par = self._parameters[name] | |
2473 if name in self._new_parameters: | |
2474 if name in current_best_values: | |
2475 par.set(value=self._reset_par_at_boundary(par, | |
2476 current_best_values[name])) | |
2477 elif par.expr is None: | |
2478 par.set(value=self._best_values[i][n]) | |
2479 # print('\nbefore fit') | |
2480 # self._parameters.pretty_print() | |
2481 # print(result.fit_report(show_correl=False)) | |
2482 if self._mask is None: | |
2483 result = self._model.fit(self._ymap_norm[n], self._parameters, x=self._x, **kwargs) | |
2484 else: | |
2485 result = self._model.fit(self._ymap_norm[n][~self._mask], self._parameters, | |
2486 x=self._x[~self._mask], **kwargs) | |
2487 # print(f'\nafter fit {n}') | |
2488 # self._parameters.pretty_print() | |
2489 # print(result.fit_report(show_correl=False)) | |
2490 out_of_bounds = False | |
2491 for name, par in self._parameter_bounds.items(): | |
2492 value = result.params[name].value | |
2493 if not np.isinf(par['min']) and value < par['min']: | |
2494 out_of_bounds = True | |
2495 break | |
2496 if not np.isinf(par['max']) and value > par['max']: | |
2497 out_of_bounds = True | |
2498 break | |
2499 # print(f'{n} redchi < redchi_cutoff = {result.redchi < self._redchi_cutoff} success = {result.success} out_of_bounds = {out_of_bounds}') | |
2500 # Reset parameters back to unbound | |
2501 for name in self._parameter_bounds.keys(): | |
2502 self._parameters[name].set(min=-np.inf, max=np.inf) | |
2503 assert(not out_of_bounds) | |
2504 if result.redchi >= self._redchi_cutoff: | |
2505 result.success = False | |
2506 if result.nfev == result.max_nfev: | |
2507 # print(f'Maximum number of function evaluations reached for n = {n}') | |
2508 # logging.warning(f'Maximum number of function evaluations reached for n = {n}') | |
2509 if result.redchi < self._redchi_cutoff: | |
2510 result.success = True | |
2511 self._max_nfev_flat[n] = True | |
2512 if result.success: | |
2513 assert(all(True for par in current_best_values if par in result.params.values())) | |
2514 for par in result.params.values(): | |
2515 if par.vary: | |
2516 current_best_values[par.name] = par.value | |
2517 else: | |
2518 logging.warning(f'Fit for n = {n} failed: {result.lmdif_message}') | |
2519 # Renormalize the data and results | |
2520 self._renormalize(n, result) | |
2521 if self._print_report: | |
2522 print(result.fit_report(show_correl=False)) | |
2523 if self._plot: | |
2524 dims = np.unravel_index(n, self._map_shape) | |
2525 if self._inv_transpose is not None: | |
2526 dims= tuple(dims[self._inv_transpose[i]] for i in range(len(dims))) | |
2527 super().plot(result=result, y=np.asarray(self._ymap[dims]), plot_comp_legends=True, | |
2528 skip_init=self._skip_init, title=str(dims)) | |
2529 #RV print(f'\n\nend FitMap._fit {n}\n') | |
2530 #RV print(f'current_best_values = {current_best_values}') | |
2531 # self._parameters.pretty_print() | |
2532 # print(result.fit_report(show_correl=False)) | |
2533 #RV print(f'\nself._best_values_flat:\n{self._best_values_flat}\n\n') | |
2534 if return_result: | |
2535 return(result) | |
2536 | |
2537 def _renormalize(self, n, result): | |
2538 self._redchi_flat[n] = np.float64(result.redchi) | |
2539 self._success_flat[n] = result.success | |
2540 if self._norm is None or not self._normalized: | |
2541 self._best_fit_flat[n] = result.best_fit | |
2542 for i, name in enumerate(self._best_parameters): | |
2543 self._best_values_flat[i][n] = np.float64(result.params[name].value) | |
2544 self._best_errors_flat[i][n] = np.float64(result.params[name].stderr) | |
2545 else: | |
2546 pars = set(self._parameter_norms) & set(self._best_parameters) | |
2547 for name, par in result.params.items(): | |
2548 if name in pars and self._parameter_norms[name]: | |
2549 if par.stderr is not None: | |
2550 par.stderr *= self._norm[1] | |
2551 if par.expr is None: | |
2552 par.value *= self._norm[1] | |
2553 if self._print_report: | |
2554 if par.init_value is not None: | |
2555 par.init_value *= self._norm[1] | |
2556 if not np.isinf(par.min) and abs(par.min) != float_min: | |
2557 par.min *= self._norm[1] | |
2558 if not np.isinf(par.max) and abs(par.max) != float_min: | |
2559 par.max *= self._norm[1] | |
2560 self._best_fit_flat[n] = result.best_fit*self._norm[1]+self._norm[0] | |
2561 for i, name in enumerate(self._best_parameters): | |
2562 self._best_values_flat[i][n] = np.float64(result.params[name].value) | |
2563 self._best_errors_flat[i][n] = np.float64(result.params[name].stderr) | |
2564 if self._plot: | |
2565 if not self._skip_init: | |
2566 result.init_fit = result.init_fit*self._norm[1]+self._norm[0] | |
2567 result.best_fit = np.copy(self._best_fit_flat[n]) |