def run(self): path_split = self.path.split(os.get_path_seperator()) backup_name = path_split[len(path_split) - 1] backup_name = backup_name.split('.')[0] backup = os.copy_file(self.path, f"{backup_name}{self.backup_name_index}.bak") with open(self.path, 'w') as data_file: with open(backup) as backup_file: data_file.write(backup_file.readline()) data_reading = backup_file.readline() index = 0 while index < self.readings and data_reading != "": data_file.write(data_reading) data_reading = backup_file.readline() index += 1 os.remove_file(backup)
def _merge_training(self, training, backup): with open(backup) as backup_training: temp = backup_training.readline() while temp != "": training.write(temp) temp = backup_training.readline() os.remove_file(backup)
def append_training_set(self, file, path, ressources): for res_elem in ressources: training_data = "" if not self._is_ecg(res_elem): continue res_path = os.path_join(path, res_elem) temp_data = loader.load_data(res_path) if self._is_missing_inputs(len(temp_data.values)): os.remove_file(res_path) continue training_data = self._append_data_set(training_data, temp_data) file.write(training_data) os.remove_file(res_path)
def _save_training_samples(self, rec_loc, indexes): copied_file = os.copy_file(rec_loc, "temp.bak") index = 0 with open(copied_file) as training_copy: with open(rec_loc, 'w') as training: training.write(training_copy.readline()) for elem in indexes: for _ in range(index, elem): index += 1 training.write(training_copy.readline()) training_copy.readline() index += 1 os.remove_file(copied_file)
def run(self): file_to_backup = os.path_join(self.path, self.resource) backup_name = f"{self.resource.split('.')[0]}.bak" backup_path = os.copy_file(file_to_backup, backup_name) with open(os.path_join(self.path, self.resource), 'w') as datafile: with open(backup_path) as data: datafile.write(data.readline()) temp = data.readline() while temp != "": normalized_reading = self.data_reading_normalizer(temp) datafile.write(normalized_reading) temp = data.readline() os.remove_file(backup_path)
def run(self): backup_path = os.copy_file(self.res_path, self.backup_name) with open(self.res_path, 'w') as data_file: with open(backup_path) as backup: data = backup.read() splitted_data = data.split("\n") data_file.write("readings\n") index = 0 for elem in splitted_data: try: reading = self.get_reading_value(elem) if reading != None: data_file.write(f"{reading}\n") index += 1 except: pass os.remove_file(backup_path)
def run(self): fantasia_counter = self._count(self.fantasia) nsr_counter = self._count(self.nsr) ltaf_counter = self._count(self.ltaf) with open(self.path, 'w') as res_file: fantasia = open(self.fantasia) nsr = open(self.nsr) ltaf = open(self.ltaf) fantasia_temp = fantasia.readline() nsr_temp = nsr.readline() ltaf_temp = ltaf.readline() res_file.write(fantasia_temp) ltaf_read_rate = (ltaf_counter // (fantasia_counter + nsr_counter)) - 1 ltaf_counter = fantasia_counter + nsr_counter while fantasia_temp != "" and nsr_temp != "" and ltaf_temp != "": fantasia_temp = self._write_data(res_file, fantasia, fantasia_counter, self.concat_rate, False) nsr_temp = self._write_data(res_file, nsr, nsr_counter, self.concat_rate, False) ltaf_temp = self._write_data(res_file, ltaf, ltaf_counter, self.concat_rate, True, ltaf_read_rate) while fantasia_temp != "" and ltaf_temp != "": fantasia_temp = self._write_data(res_file, fantasia, fantasia_counter, self.concat_rate, False) ltaf_temp = self._write_data(res_file, ltaf, ltaf_counter, self.concat_rate, True, ltaf_read_rate) while nsr_temp != "" and ltaf_temp != "": nsr_temp = self._write_data(res_file, nsr, nsr_counter, self.concat_rate, False) ltaf_temp = self._write_data(res_file, ltaf, ltaf_counter, self.concat_rate, True, ltaf_read_rate) fantasia.close() nsr.close() ltaf.close() os.remove_file(self.fantasia) os.remove_file(self.nsr) os.remove_file(self.ltaf)
def exec(self, task_input, task_output): sampling_frequency = task_input["sampled_frequency"] target_frequency = task_input["target_frequency"] doubling_rate = math.ceil(sampling_frequency / (((target_frequency / sampling_frequency) - 1) * sampling_frequency)) res_elems = os.dir_res_list(task_output["res_loc"]) for res_elem in res_elems: origin_path = os.path_join(task_output["res_loc"], res_elem) if os.is_path_file(origin_path): backup_path = os.copy_file(origin_path, f"{task_input['name']}_bakup.bak") with open(origin_path, 'w') as file: with open(backup_path) as backup_file: temp_reading = backup_file.readline() sampling_nr = 1 while temp_reading != "": temp_reading = backup_file.readline() if temp_reading != "": splitted = temp_reading.split(',') file.write(f"{sampling_nr},{splitted[1]},{splitted[2]}") if "f2" in res_elem: file.write("\n") sampling_nr += 1 for _ in range(1, doubling_rate): temp_reading = backup_file.readline() if temp_reading != "": splitted = temp_reading.split(',') file.write(f"{sampling_nr},{splitted[1]},{splitted[2]}") if "f2" in res_elem: file.write("\n") sampling_nr += 1 if temp_reading != "": splitted = temp_reading.split(',') file.write(f"{sampling_nr},{splitted[1]},{splitted[2]}") if "f2" in res_elem: file.write("\n") sampling_nr += 1 splitted = temp_reading.split(',') file.write(f"{sampling_nr},{splitted[1]},{splitted[2]}") if "f2" in res_elem: file.write("\n") sampling_nr += 1 temp_reading = backup_file.readline() os.remove_file(backup_path)