def fit(self, train_x, train_y, validation_data_fit, train_loop_num, **kwargs): val_x, val_y = validation_data_fit # if train_loop_num == 1: # patience = 2 # epochs = 3 # elif train_loop_num == 2: # patience = 3 # epochs = 10 # elif train_loop_num < 10: # patience = 4 # epochs = 16 # elif train_loop_num < 15: # patience = 4 # epochs = 24 # else: # patience = 8 # epochs = 32 epochs = 3 patience = 2 callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience)] self._model.fit(train_x, ohe2cat(train_y), epochs=epochs, callbacks=callbacks, validation_data=(val_x, ohe2cat(val_y)), verbose=1, # Logs once per epoch. batch_size=16, shuffle=True)
def find_lr(data_index): D = AutoSpeechDataset( os.path.join( r"/home/chengfeng/autospeech/data/data0{}".format(data_index), 'data0{}.data'.format(data_index))) D.read_dataset() metadata = D.get_metadata() x_train, y_train = D.get_train() my_model = CrnnModel() x_train = my_model.preprocess_data(x_train) log(f'x_train shape: {x_train.shape}; y_train shape: {y_train.shape}') y_train = ohe2cat(y_train) my_model.init_model(input_shape=x_train.shape[1:], num_classes=metadata[CLASS_NUM]) lr_finder = LRFinder(my_model._model) lr_finder.find(x_train, y_train, start_lr=0.0001, end_lr=1, batch_size=64, epochs=200) # Plot the loss, ignore 20 batches in the beginning and 5 in the end lr_finder.plot_loss(n_skip_beginning=20, n_skip_end=5) lr_finder.plot_loss_change(sma=20, n_skip_beginning=20, n_skip_end=5, y_lim=(-0.01, 0.01))
def fit(self, train_x, train_y, validation_data_fit, train_loop_num, **kwargs): val_x, val_y = validation_data_fit callbacks = [ tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3) ] epochs = 10 if train_loop_num == 1 else 30 self._model.fit( train_x, ohe2cat(train_y), epochs=epochs, callbacks=callbacks, validation_data=(val_x, ohe2cat(val_y)), verbose=1, # Logs once per epoch. batch_size=32, shuffle=True)
def fit(self, train_x, train_y, validation_data_fit, train_loop_num, **kwargs): val_x, val_y = validation_data_fit # if train_loop_num == 1: # patience = 2 # epochs = 3 # elif train_loop_num == 2: # patience = 3 # epochs = 10 # elif train_loop_num < 10: # patience = 4 # epochs = 16 # elif train_loop_num < 15: # patience = 4 # epochs = 24 # else: # patience = 8 # epochs = 32 patience = 2 # epochs = self.epoch_cnt + 3 epochs = 3 callbacks = [ tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience) ] self._model.fit( train_x, ohe2cat(train_y), epochs=epochs, callbacks=callbacks, validation_data=(val_x, ohe2cat(val_y)), # validation_split=0.2, verbose=1, # Logs once per epoch. batch_size=32, shuffle=True, # initial_epoch=self.epoch_cnt, # use_multiprocessing=True ) self.epoch_cnt += 3
def fit(self, train_x, train_y, validation_data_fit, train_loop_num, **kwargs): val_x, val_y = validation_data_fit epochs = 5 patience = 2 batch_size = 32 # over_batch = len(train_x) % batch_size # append_idx = np.random.choice(np.arange(len(train_x)), size=batch_size-over_batch, replace=False) # train_x = np.concatenate([train_x, train_x[append_idx]], axis=0) # train_y = np.concatenate([train_y, train_y[append_idx]], axis=0) callbacks = [tf.keras.callbacks.EarlyStopping( monitor='val_loss', patience=patience)] self._model.fit(train_x, ohe2cat(train_y), epochs=epochs, callbacks=callbacks, validation_data=(val_x, ohe2cat(val_y)), verbose=1, # Logs once per epoch. batch_size=batch_size, shuffle=True)
def fit(self, x_train, y_train, *args, **kwargs): self._model.fit(x_train, ohe2cat(y_train))