def load_json(file_name, file_path): """Load json file. Parameters ---------- file_name : str or FileObject, optional File from which to load data. file_path : str Path to directory from which to load. If not provided, loads from current directory. Returns ------- dat : dict Dictionary of data loaded from file. """ # Load data from file if isinstance(file_name, str): with open(os.path.join(file_path, _check_fname(file_name)), 'r') as infile: dat = json.load(infile) elif isinstance(file_name, io.IOBase): dat = json.loads(file_name.readline()) # Get dictionary of available attributes, and convert specified lists back into arrays dat = dict_lst_to_array(dat, get_obj_desc()['arrays']) return dat
def save_fm(fm, file_name, file_path='', append=False, save_results=False, save_settings=False, save_data=False): """Save out data, results and/or settings from FOOOF object. Saves out to a JSON file. Parameters ---------- fm : FOOOF() object FOOOF object from which to save data. file_name : str or FileObject File to which to save data. file_path : str, optional Path to directory to which the save. If not provided, saves to current directory. append : bool, optional Whether to append to an existing file, if available. default: False This option is only valid (and only used) if file_name is a str. save_results : bool, optional Whether to save out FOOOF model fit results. save_settings : bool, optional Whether to save out FOOOF settings. save_data : bool, optional Whether to save out input data. """ # Convert object to dictionary & convert all arrays to lists - for JSON serializing obj_dict = dict_array_to_lst(fm.__dict__) # Set and select which variables to keep. Use a set to drop any potential overlap # Note that results also saves frequency information to be able to recreate freq vector attributes = get_obj_desc() keep = set((attributes['results'] + attributes['freq_info'] if save_results else []) + \ (attributes['settings'] if save_settings else []) + \ (attributes['data'] if save_data else [])) obj_dict = dict_select_keys(obj_dict, keep) # Save out - create new file, (creates a JSON file) if isinstance(file_name, str) and not append: with open(os.path.join(file_path, _check_fname(file_name)), 'w') as outfile: json.dump(obj_dict, outfile) # Save out - append to file_name (appends to a JSONlines file) elif isinstance(file_name, str) and append: with open(os.path.join(file_path, _check_fname(file_name)), 'a') as outfile: json.dump(obj_dict, outfile) outfile.write('\n') # Save out - append to given file object (appends to a JSONlines file) elif isinstance(file_name, io.IOBase): json.dump(obj_dict, file_name) file_name.write('\n') else: raise ValueError('Save file not understood.')
def _check_loaded_settings(self, data): """Check if settings added, and update the object as needed. Parameters ---------- data : dict The dictionary of data that has been added to the object. """ # If settings not loaded from file, clear from object, so that default # settings, which are potentially wrong for loaded data, aren't kept if not set(get_obj_desc()['settings']).issubset(set(data.keys())): # Reset all public settings to None for setting in get_obj_desc()['settings']: setattr(self, setting, None) # Infer whether knee fitting was used, if background params have been loaded if np.all(self.background_params_): self.background_mode = infer_bg_func(self.background_params_) # Reset internal settings so that they are consistent with what was loaded # Note that this will set internal settings to None, if public settings unavailable self._reset_internal_settings()
def test_fooof_fit_failure(): """Test that fit handles a failure.""" # Use a new FOOOF, that is monkey-patched to raise an error # This mimicks the main fit-failure, without requiring bad data / waiting for it to fail. tfm = FOOOF() def raise_runtime_error(*args, **kwargs): raise RuntimeError('Test-MonkeyPatch') tfm._fit_peaks = raise_runtime_error # Run a FOOOF fit - this should raise an error, but continue in try/except tfm.fit(*gen_power_spectrum([3, 50], [50, 2], [10, 0.5, 2, 20, 0.3, 4])) # Check after failing out of fit, all results are reset for result in get_obj_desc()['results']: cur_res = getattr(tfm, result) assert cur_res is None or np.all(np.isnan(cur_res))
def get_data_info(f_obj): """Get a dictionary of current data information from a FOOOF or FOOOFGroup object. Parameters ---------- f_obj : FOOOF or FOOOFGroup FOOOF derived object to get data information from. Returns ------- dictionary Data information for the input FOOOF derived object. """ return { dat_info: getattr(f_obj, dat_info) for dat_info in get_obj_desc()['freq_info'] }
def get_settings(f_obj): """Get a dictionary of current settings from a FOOOF or FOOOFGroup object. Parameters ---------- f_obj : FOOOF or FOOOFGroup FOOOF derived object to get settings from. Returns ------- dictionary Settings for the input FOOOF derived object. """ return { setting: getattr(f_obj, setting) for setting in get_obj_desc()['settings'] }
def test_fg_get_fooof(tfg): """Check return of an individual model fit to a FOOOF object from FOOOFGroup.""" desc = get_obj_desc() # Check without regenerating tfm0 = tfg.get_fooof(0, False) assert tfm0 # Check that settings are copied over properly for setting in desc['settings']: assert getattr(tfg, setting) == getattr(tfm0, setting) # Check with regenerating tfm1 = tfg.get_fooof(1, True) assert tfm1 # Check that regenerated model is created for result in desc['results']: assert np.all(getattr(tfm1, result))
def combine_fooofs(fooofs): """Combine a group of FOOOF and/or FOOOFGroup objects into a single FOOOFGroup object. Parameters ---------- fooofs : list of FOOOF objects FOOOF objects to be concatenated into a FOOOFGroup. Returns ------- fg : FOOOFGroup object Resultant FOOOFGroup object created from input FOOOFs. """ # Compare settings if not compare_settings(fooofs) or not compare_data_info(fooofs): raise ValueError("These objects have incompatible settings or data," \ "and so cannot be combined.") # Initialize FOOOFGroup object, with settings derived from input objects # Note: FOOOFGroup imported here to avoid an import circularity if imported at the top from fooof import FOOOFGroup fg = FOOOFGroup(**get_settings(fooofs[0]), verbose=fooofs[0].verbose) fg.power_spectra = np.empty([0, len(fooofs[0].freqs)]) # Add FOOOF results from each FOOOF object to group for f_obj in fooofs: # Add FOOOFGroup object if isinstance(f_obj, FOOOFGroup): fg.group_results.extend(f_obj.group_results) fg.power_spectra = np.vstack( [fg.power_spectra, f_obj.power_spectra]) # Add FOOOF object else: fg.group_results.append(f_obj.get_results()) fg.power_spectra = np.vstack( [fg.power_spectra, f_obj.power_spectrum]) # Add data information information for data_info in get_obj_desc()['freq_info']: setattr(fg, data_info, getattr(fooofs[0], data_info)) fg.freqs = gen_freqs(fg.freq_range, fg.freq_res) return fg
def _check_loaded_results(self, data, regenerate=True): """Check if results added, check data, and regenerate model, if requested. Parameters ---------- data : dict The dictionary of data that has been added to the object. regenerate : bool, optional Whether to regenerate the power_spectrum model. default : True """ # If results loaded, check dimensions of osc/gauss parameters # This fixes an issue where they end up the wrong shape if they are empty (no oscs) if set(get_obj_desc()['results']).issubset(set(data.keys())): self.peak_params_ = check_array_dim(self.peak_params_) self._gaussian_params = check_array_dim(self._gaussian_params) # Regenerate power_spectrum model & components if regenerate: if np.all(self.freqs) and np.all(self.background_params_): self._regenerate_model()
def test_load_file_contents(): """Check that loaded files contain the contents they should. Note that is this test fails, it likely stems from an issue from saving. """ file_name = 'test_fooof_str_all' file_path = pkg.resource_filename(__name__, 'test_files') loaded_data = load_json(file_name, file_path) desc = get_obj_desc() # Check settings for setting in desc['settings']: assert setting in loaded_data.keys() # Check results for result in desc['results']: assert result in loaded_data.keys() # Check results for datum in desc['data']: assert datum in loaded_data.keys()