def fit(self, X_train, y_train, X_val, y_val): if X_train.ndim != 4: raise Exception( 'ValueError: `X_train` is incompatible: expected ndim=4, found ndim=' + str(X_train.ndim)) elif X_val.ndim != 4: raise Exception( 'ValueError: `X_val` is incompatible: expected ndim=4, found ndim=' + str(X_val.ndim)) self.input_shape = X_train.shape[1:] if self.data_format == 'channels_first': self.Chans = self.input_shape[1] self.Samples = self.input_shape[2] else: self.Chans = self.input_shape[0] self.Samples = self.input_shape[1] csv_logger = CSVLogger(self.csv_dir) time_callback = TimeHistory(self.time_log) checkpointer = ModelCheckpoint(monitor=self.monitor, filepath=self.weights_dir, verbose=self.verbose, save_best_only=self.save_best_only, save_weight_only=self.save_weight_only) reduce_lr = ReduceLROnPlateau(monitor=self.monitor, patience=self.patience, factor=self.factor, mode=self.mode, verbose=self.verbose, min_lr=self.min_lr) es = EarlyStopping(monitor=self.monitor, mode=self.mode, verbose=self.verbose, patience=self.es_patience) model = self.build() model.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics) model.summary() print("The first kernel size is (1, {})".format(self.kernLength)) if self.class_balancing: # compute_class_weight if class_balancing is True self.class_weight = compute_class_weight(y_train) else: self.class_weight = None model.fit( X_train, y_train, batch_size=self.batch_size, shuffle=self.shuffle, epochs=self.epochs, validation_data=(X_val, y_val), class_weight=self.class_weight, callbacks=[checkpointer, csv_logger, reduce_lr, es, time_callback])
def fit(self, X_train, y_train, X_val, y_val): if X_train.ndim != 5: raise Exception( 'ValueError: `X_train` is incompatible: expected ndim=5, found ndim=' + str(X_train.ndim)) elif X_val.ndim != 5: raise Exception( 'ValueError: `X_val` is incompatible: expected ndim=5, found ndim=' + str(X_val.ndim)) self.input_shape = (X_train.shape[2], X_train.shape[3], X_train.shape[4]) self.n_subbands = X_train.shape[1] csv_logger = CSVLogger(self.csv_dir) time_callback = TimeHistory(self.time_log) checkpointer = ModelCheckpoint(monitor=self.monitor, filepath=self.weights_dir, verbose=self.verbose, save_best_only=self.save_best_only, save_weight_only=self.save_weight_only) reduce_lr = ReduceLROnPlateau(monitor=self.monitor, patience=self.patience, factor=self.factor, mode=self.mode, verbose=self.verbose, min_lr=self.min_lr) es = EarlyStopping(monitor=self.monitor, mode=self.mode, verbose=self.verbose, patience=self.es_patience) model = self.build() model.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics) model.summary() if self.class_balancing: # compute_class_weight if class_balancing is True self.class_weight = compute_class_weight(y_train) else: self.class_weight = None model.fit( [X_train[:, i, :, :, :] for i in range(self.n_subbands)], y_train, batch_size=self.batch_size, shuffle=self.shuffle, class_weight=self.class_weight, epochs=self.epochs, validation_data=([ X_val[:, i, :, :, :] for i in range(self.n_subbands) ], y_val), callbacks=[checkpointer, csv_logger, reduce_lr, es, time_callback])
def fit(self, X_train, y_train, X_val, y_val): if X_train.ndim != 4: raise Exception( 'ValueError: `X_train` is incompatible: expected ndim=4, found ndim={}' .format(X_train.ndim)) elif X_val.ndim != 4: raise Exception( 'ValueError: `X_val` is incompatible: expected ndim=4, found ndim={}' .format(X_val.ndim)) csv_logger = CSVLogger(self.csv_dir) time_callback = TimeHistory(self.time_log) checkpointer = ModelCheckpoint(monitor=self.monitor, filepath=self.weights_dir, verbose=self.verbose, save_best_only=self.save_best_only, save_weight_only=self.save_weight_only) reduce_lr = ReduceLROnPlateau(monitor=self.monitor, patience=self.patience, factor=self.factor, mode=self.mode, verbose=self.verbose, min_lr=self.min_lr) es = EarlyStopping(monitor=self.monitor, mode=self.mode, verbose=self.verbose, patience=self.es_patience) model = self.build() model.summary() if self.class_balancing: # compute_class_weight if class_balancing is True class_weight = compute_class_weight(y_train) self.loss[-1] = SparseCategoricalCrossentropy( class_weight=class_weight) model.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics, loss_weights=self.loss_weights) model.fit( x=X_train, y=[y_train, y_train], batch_size=self.batch_size, shuffle=self.shuffle, epochs=self.epochs, validation_data=(X_val, [y_val, y_val]), callbacks=[checkpointer, csv_logger, reduce_lr, es, time_callback])