def load_json(file_name, file_path): """Load json file. Parameters ---------- file_name : str or FileObject, optional File from which to load data. file_path : str Path to directory from which to load. Returns ------- dat : dict Dictionary of data loaded from file. """ # Load data from file if isinstance(file_name, str): with open(fpath(file_path, fname(file_name, 'json')), 'r') as infile: dat = json.load(infile) elif isinstance(file_name, io.IOBase): dat = json.loads(file_name.readline()) # Get dictionary of available attributes, and convert specified lists back into arrays dat = dict_lst_to_array(dat, get_obj_desc()['arrays']) return dat
def save_fm(fm, file_name, file_path=None, append=False, save_results=False, save_settings=False, save_data=False): """Save out data, results and/or settings from FOOOF object. Saves out to a JSON file. Parameters ---------- fm : FOOOF() object FOOOF object from which to save data. file_name : str or FileObject File to which to save data. file_path : str, optional Path to directory in which to save. If not provided, saves to current directory. append : bool, optional Whether to append to an existing file, if available. default: False This option is only valid (and only used) if file_name is a str. save_results : bool, optional Whether to save out FOOOF model fit results. save_settings : bool, optional Whether to save out FOOOF settings. save_data : bool, optional Whether to save out input data. """ # Convert object to dictionary & convert all arrays to lists - for JSON serializing obj_dict = dict_array_to_lst(fm.__dict__) # Set and select which variables to keep. Use a set to drop any potential overlap # Note that results also saves frequency information to be able to recreate freq vector attributes = get_obj_desc() keep = set((attributes['results'] + attributes['freq_info'] if save_results else []) + \ (attributes['settings'] if save_settings else []) + \ (attributes['data'] if save_data else [])) obj_dict = dict_select_keys(obj_dict, keep) # Save out - create new file, (creates a JSON file) if isinstance(file_name, str) and not append: with open(fpath(file_path, fname(file_name, 'json')), 'w') as outfile: json.dump(obj_dict, outfile) # Save out - append to file_name (appends to a JSONlines file) elif isinstance(file_name, str) and append: with open(fpath(file_path, fname(file_name, 'json')), 'a') as outfile: json.dump(obj_dict, outfile) outfile.write('\n') # Save out - append to given file object (appends to a JSONlines file) elif isinstance(file_name, io.IOBase): json.dump(obj_dict, file_name) file_name.write('\n') else: raise ValueError('Save file not understood.')
def _check_loaded_settings(self, data): """Check if settings added, and update the object as needed. Parameters ---------- data : dict The dictionary of data that has been added to the object. """ # If settings not loaded from file, clear from object, so that default # settings, which are potentially wrong for loaded data, aren't kept if not set(get_obj_desc()['settings']).issubset(set(data.keys())): # Reset all public settings to None for setting in get_obj_desc()['settings']: setattr(self, setting, None) # Infer whether knee fitting was used, if aperiodic params have been loaded if np.all(self.aperiodic_params_): self.aperiodic_mode = infer_ap_func(self.aperiodic_params_) # Reset internal settings so that they are consistent with what was loaded # Note that this will set internal settings to None, if public settings unavailable self._reset_internal_settings()
def test_fooof_fit_failure(): """Test that fit handles a failure.""" # Use a new FOOOF, that is monkey-patched to raise an error # This mimicks the main fit-failure, without requiring bad data / waiting for it to fail. tfm = FOOOF() def raise_runtime_error(*args, **kwargs): raise RuntimeError('Test-MonkeyPatch') tfm._fit_peaks = raise_runtime_error # Run a FOOOF fit - this should raise an error, but continue in try/except tfm.fit(*gen_power_spectrum([3, 50], [50, 2], [10, 0.5, 2, 20, 0.3, 4])) # Check after failing out of fit, all results are reset for result in get_obj_desc()['results']: cur_res = getattr(tfm, result) assert cur_res is None or np.all(np.isnan(cur_res))
def get_data_info(f_obj): """Get a dictionary of current data information from a FOOOF or FOOOFGroup object. Parameters ---------- f_obj : FOOOF or FOOOFGroup FOOOF derived object to get data information from. Returns ------- dictionary Data information for the input FOOOF derived object. """ return { dat_info: getattr(f_obj, dat_info) for dat_info in get_obj_desc()['freq_info'] }
def get_settings(f_obj): """Get a dictionary of current settings from a FOOOF or FOOOFGroup object. Parameters ---------- f_obj : FOOOF or FOOOFGroup FOOOF derived object to get settings from. Returns ------- dictionary Settings for the input FOOOF derived object. """ return { setting: getattr(f_obj, setting) for setting in get_obj_desc()['settings'] }
def _check_loaded_results(self, data, regenerate=True): """Check if results added, check data, and regenerate model, if requested. Parameters ---------- data : dict The dictionary of data that has been added to the object. regenerate : bool, optional Whether to regenerate the power_spectrum model. default : True """ # If results loaded, check dimensions of peak parameters # This fixes an issue where they end up the wrong shape if they are empty (no peaks) if set(get_obj_desc()['results']).issubset(set(data.keys())): self.peak_params_ = check_array_dim(self.peak_params_) self._gaussian_params = check_array_dim(self._gaussian_params) # Regenerate power_spectrum model & components if regenerate: if np.all(self.freqs) and np.all(self.aperiodic_params_): self._regenerate_model()
def test_load_file_contents(): """Check that loaded files contain the contents they should. Note that is this test fails, it likely stems from an issue from saving. """ file_name = 'test_fooof_str_all' file_path = pkg.resource_filename(__name__, 'test_files') loaded_data = load_json(file_name, file_path) desc = get_obj_desc() # Check settings for setting in desc['settings']: assert setting in loaded_data.keys() # Check results for result in desc['results']: assert result in loaded_data.keys() # Check results for datum in desc['data']: assert datum in loaded_data.keys()