def fit(self, train_x, train_y, validation_data_fit, train_loop_num, **kwargs): val_x, val_y = validation_data_fit epochs = 10 patience = 2 callbacks = [ tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience) ] self._model.fit( train_x, ohe2cat(train_y), epochs=epochs, callbacks=callbacks, validation_data=(val_x, ohe2cat(val_y)), verbose=1, # Logs once per epoch. batch_size=32, shuffle=True)
def fit(self, train_x, train_y, validation_data_fit, round_num, **kwargs): val_x, val_y = validation_data_fit # if train_loop_num == 1: # patience = 2 # epochs = 3 # elif train_loop_num == 2: # patience = 3 # epochs = 10 # elif train_loop_num < 10: # patience = 4 # epochs = 16 # elif train_loop_num < 15: # patience = 4 # epochs = 24 # else: # patience = 8 # epochs = 32 patience = 2 # epochs = self.epoch_cnt + 3 epochs = 10 callbacks = [ tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience) ] self._model.fit( train_x, ohe2cat(train_y), epochs=epochs, callbacks=callbacks, validation_data=(val_x, ohe2cat(val_y)), # validation_split=0.2, verbose=1, # Logs once per epoch. batch_size=32, shuffle=True, # initial_epoch=self.epoch_cnt, # use_multiprocessing=True ) self.epoch_cnt += 3
def fit(self, train_x, train_y, validation_data_fit, train_loop_num, **kwargs): val_x, val_y = validation_data_fit epochs = 5 patience = 2 batch_size = 32 # over_batch = len(train_x) % batch_size # append_idx = np.random.choice(np.arange(len(train_x)), size=batch_size-over_batch, replace=False) # train_x = np.concatenate([train_x, train_x[append_idx]], axis=0) # train_y = np.concatenate([train_y, train_y[append_idx]], axis=0) callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience)] self._model.fit( train_x, ohe2cat(train_y), epochs=epochs, callbacks=callbacks, validation_data=(val_x, ohe2cat(val_y)), verbose=1, # Logs once per epoch. batch_size=batch_size, shuffle=True )
def fit(self, train_x, train_y, validation_data_fit, train_loop_num, **kwargs): val_x, val_y = validation_data_fit # if train_loop_num == 1: # patience = 2 # epochs = 8 # elif train_loop_num == 2: # patience = 3 # epochs = 10 # elif train_loop_num < 10: # patience = 4 # epochs = 16 # elif train_loop_num < 15: # patience = 4 # epochs = 24 # else: # patience = 8 # epochs = 32 epochs = 3 patience = 2 callbacks = [ tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience) ] self._model.fit( train_x, ohe2cat(train_y), epochs=epochs, callbacks=callbacks, validation_data=(val_x, ohe2cat(val_y)), verbose=1, # Logs once per epoch. batch_size=32, shuffle=True)
def fit(self, train_loop_num=1, **kwargs): # select model first, inorder to use preprocess data method self._pre_select_model(train_loop_num) log('fit {} for {} times'.format(self._model_name, self._cur_model_run_loop)) self._cur_model_run_loop += 1 # get data if self._round_num == 0: train_x, train_y, val_x, val_y = self._data_manager.get_train_data( train_loop_num=train_loop_num, model_num=self._model_num, round_num=self._round_num, use_new_train=self._use_new_train, use_mfcc=self._use_mfcc) self._is_nedd_30s = self._data_manager.need_30s if self._is_nedd_30s: self._use_mel_round = 3 else: self._use_mel_round = 2 else: if self._round_num == self._use_mel_round: self._use_mfcc = False else: self._use_mfcc = True train_x, train_y, val_x, val_y = self._data_manager.get_train_data( train_loop_num=train_loop_num, model_num=self._model_num, round_num=self._round_num, use_new_train=self._use_new_train, use_mfcc=self._use_mfcc) self._val_set = (val_x, val_y) self._input_shape = train_x.shape log('train_x: {}; train_y: {};'.format(train_x.shape, train_y.shape) + ' val_x: {}; val_y: {};'.format(val_x.shape, val_y.shape)) # init model really self._get_or_create_model() self._classes = np.unique(ohe2cat(train_y)) self._model.fit(train_x, train_y, (val_x, val_y), self._round_num, **kwargs)
def fit(self, x_train, y_train, *args, **kwargs): self._model.fit(x_train, ohe2cat(y_train))