def train(self, input_data, labels): """ Train a Convolutional neural network :param input_data: Data to be trained with neural network :type input_data: ndarray :param labels: Labels to be trained with neural network :type labels: ndarray :return: None :rtype: NoneType :History: 2017-Dec-06 - Written - Henry Leung (University of Toronto) """ # Call the checklist to create astroNN folder and save parameters self.pre_training_checklist_child(input_data, labels) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, min_delta=self.reduce_lr_epsilon, patience=self.reduce_lr_patience, min_lr=self.reduce_lr_min, mode='min', verbose=2) early_stopping = EarlyStopping(monitor='val_loss', min_delta=self.early_stopping_min_delta, patience=self.early_stopping_patience, verbose=2, mode='min') self.virtual_cvslogger = VirutalCSVLogger() self.__callbacks = [reduce_lr, self.virtual_cvslogger ] # default must have unchangeable callbacks if self.callbacks is not None: if isinstance(self.callbacks, list): self.__callbacks.extend(self.callbacks) else: self.__callbacks.append(self.callbacks) start_time = time.time() self.history = self.keras_model.fit( x=self.training_generator, validation_data=self.validation_generator, epochs=self.max_epochs, verbose=self.verbose, workers=os.cpu_count(), callbacks=self.__callbacks, use_multiprocessing=MULTIPROCESS_FLAG) print( f'Completed Training, {(time.time() - start_time):.{2}f}s in total' ) if self.autosave is True: # Call the post training checklist to save parameters self.save() return None
def train(self, input_data, input_recon_target): # Call the checklist to create astroNN folder and save parameters self.pre_training_checklist_child(input_data, input_recon_target) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, min_delta=self.reduce_lr_epsilon, patience=self.reduce_lr_patience, min_lr=self.reduce_lr_min, mode='min', verbose=2) self.virtual_cvslogger = VirutalCSVLogger() self.keras_model.fit_generator( generator=self.training_generator, steps_per_epoch=self.num_train // self.batch_size, validation_data=self.validation_generator, validation_steps=self.val_num // self.batch_size, epochs=self.max_epochs, verbose=self.verbose, workers=os.cpu_count(), callbacks=[reduce_lr, self.virtual_cvslogger], use_multiprocessing=MULTIPROCESS_FLAG) if self.autosave is True: # Call the post training checklist to save parameters self.save() return None
def train(self, input_data, labels, inputs_err=None, labels_err=None): """ Train a Bayesian neural network :param input_data: Data to be trained with neural network :type input_data: ndarray :param labels: Labels to be trained with neural network :type labels: ndarray :param inputs_err: Error for input_data (if any), same shape with input_data. :type inputs_err: Union([NoneType, ndarray]) :param labels_err: Labels error (if any) :type labels_err: Union([NoneType, ndarray]) :return: None :rtype: NoneType :History: | 2018-Jan-06 - Written - Henry Leung (University of Toronto) | 2018-Apr-12 - Updated - Henry Leung (University of Toronto) """ if inputs_err is None: inputs_err = np.zeros_like(input_data) if labels_err is None: labels_err = np.zeros_like(labels) # Call the checklist to create astroNN folder and save parameters self.pre_training_checklist_child(input_data, labels, inputs_err, labels_err) reduce_lr = ReduceLROnPlateau(monitor='val_output_loss', factor=0.5, min_delta=self.reduce_lr_epsilon, patience=self.reduce_lr_patience, min_lr=self.reduce_lr_min, mode='min', verbose=2) self.virtual_cvslogger = VirutalCSVLogger() self.__callbacks = [reduce_lr, self.virtual_cvslogger] # default must have unchangeable callbacks if self.callbacks is not None: if isinstance(self.callbacks, list): self.__callbacks.extend(self.callbacks) else: self.__callbacks.append(self.callbacks) start_time = time.time() self.history = self.keras_model.fit_generator(generator=self.training_generator, steps_per_epoch=self.num_train // self.batch_size, validation_data=self.validation_generator, validation_steps=max(self.val_num // self.batch_size, 1), epochs=self.max_epochs, verbose=self.verbose, workers=os.cpu_count(), callbacks=self.__callbacks, use_multiprocessing=MULTIPROCESS_FLAG) print(f'Completed Training, {(time.time() - start_time):.{2}f}s in total') if self.autosave is True: # Call the post training checklist to save parameters self.save() return None
def train(self, input_data, labels): # Call the checklist to create astroNN folder and save parameters self.pre_training_checklist_child(input_data, labels) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, min_delta=self.reduce_lr_epsilon, patience=self.reduce_lr_patience, min_lr=self.reduce_lr_min, mode='min', verbose=2) early_stopping = EarlyStopping(monitor='val_loss', min_delta=self.early_stopping_min_delta, patience=self.early_stopping_patience, verbose=2, mode='min') self.virtual_cvslogger = VirutalCSVLogger() self.__callbacks = [reduce_lr, self.virtual_cvslogger ] # default must have unchangeable callbacks if self.callbacks is not None: if isinstance(self.callbacks, list): self.__callbacks.extend(self.callbacks) else: self.__callbacks.append(self.callbacks) start_time = time.time() self.history = self.keras_model.fit_generator( generator=self.training_generator, steps_per_epoch=self.num_train // self.batch_size, validation_data=self.validation_generator, validation_steps=self.num_train // self.batch_size, epochs=self.max_epochs, verbose=self.verbose, workers=os.cpu_count(), callbacks=self.__callbacks, use_multiprocessing=MULTIPROCESS_FLAG) print( f'Completed Training, {(time.time() - start_time):.{2}f}s in total' ) if self.autosave is True: # Call the post training checklist to save parameters self.save() return None
def fit(self, input_data, labels, inputs_err=None, labels_err=None, sample_weights=None, experimental=False): """ Train a Bayesian neural network :param input_data: Data to be trained with neural network :type input_data: ndarray :param labels: Labels to be trained with neural network :type labels: ndarray :param inputs_err: Error for input_data (if any), same shape with input_data. :type inputs_err: Union([NoneType, ndarray]) :param labels_err: Labels error (if any) :type labels_err: Union([NoneType, ndarray]) :param sample_weights: Sample weights (if any) :type sample_weights: Union([NoneType, ndarray]) :return: None :rtype: NoneType :History: | 2018-Jan-06 - Written - Henry Leung (University of Toronto) | 2018-Apr-12 - Updated - Henry Leung (University of Toronto) """ if inputs_err is None: inputs_err = np.zeros_like(input_data) if labels_err is None: labels_err = np.zeros_like(labels) # TODO: allow named inputs too?? input_data = { "input": input_data, "input_err": inputs_err, "labels_err": labels_err } labels = {"output": labels, "variance_output": labels} # Call the checklist to create astroNN folder and save parameters norm_data_training, norm_data_val, norm_labels_training, norm_labels_val, sample_weights_training, sample_weights_val = self.pre_training_checklist_child( input_data, labels, sample_weights) # norm_data_training['labels_err'] = norm_data_training['labels_err'].filled(MAGIC_NUMBER).astype(np.float32) # TODO: fix the monitor name reduce_lr = ReduceLROnPlateau(monitor='val_output_mean_absolute_error', factor=0.5, min_delta=self.reduce_lr_epsilon, patience=self.reduce_lr_patience, min_lr=self.reduce_lr_min, mode='min', verbose=2) self.virtual_cvslogger = VirutalCSVLogger() self.__callbacks = [reduce_lr, self.virtual_cvslogger ] # default must have unchangeable callbacks if self.callbacks is not None: if isinstance(self.callbacks, list): self.__callbacks.extend(self.callbacks) else: self.__callbacks.append(self.callbacks) start_time = time.time() if experimental: dataset = tf.data.Dataset.from_tensor_slices( (norm_data_training, norm_labels_training, sample_weights_training)).batch(self.batch_size).shuffle( 5000, reshuffle_each_iteration=True).prefetch(tf.data.AUTOTUNE) val_dataset = tf.data.Dataset.from_tensor_slices( (norm_data_val, norm_labels_val, sample_weights_val)).batch( self.batch_size).prefetch(tf.data.AUTOTUNE) self.history = self.keras_model.fit( dataset, validation_data=val_dataset, epochs=self.max_epochs, verbose=self.verbose, workers=os.cpu_count() // 2, callbacks=self.__callbacks, use_multiprocessing=MULTIPROCESS_FLAG) else: self.history = self.keras_model.fit( self.training_generator, validation_data=self.validation_generator, epochs=self.max_epochs, verbose=self.verbose, workers=os.cpu_count() // 2, callbacks=self.__callbacks, use_multiprocessing=MULTIPROCESS_FLAG) print( f'Completed Training, {(time.time() - start_time):.{2}f}s in total' ) if self.autosave is True: # Call the post training checklist to save parameters self.save() return None