def get_callbacks(use_early_stopping=True, use_reduce_lr=True):

    callback_list = []

    if (use_early_stopping):

        callback_list.append(
            callbacks.EarlyStopping(monitor='val_loss',
                                    min_delta=0,
                                    patience=10,
                                    verbose=keras_verbosity,
                                    mode='auto'))

    if (use_reduce_lr):

        callback_list.append(
            callbacks.ReduceLROnPlateau(monitor='val_loss',
                                        factor=0.1,
                                        patience=5,
                                        verbose=keras_verbosity,
                                        mode='auto',
                                        epsilon=0.0001,
                                        cooldown=0,
                                        min_lr=0))

    return callback_list
示例#2
0
文件: main.py 项目: KristenT9/RN-UNet
def train_model(model_name,weights_save_path):
    with h5py.File(hdf5_path + mode_list[0] + "_2D_data_" + str(index) + ".h5", "r") as f:
        train_img = f["batch_patches"].value[..., 0:6]  # (num,patch_h,patch_w,6)
        train_gt = f["batch_patches"].value[..., 6]  # (num,patch_h,patch_w)
    with h5py.File(hdf5_path + mode_list[1] + "_2D_data_" + str(index) + ".h5", "r") as f:
        val_img = f["batch_patches"].value[..., 0:6]  # (num,patch_h,patch_w,6)
        val_gt = f["batch_patches"].value[..., 6]  # (num,patch_h,patch_w)
    print(train_img.shape)
    if model_name == "Unet":
        model = unet.unet(input_height=patch_h, input_width=patch_w)
    elif model_name == "Nonlocal dsv Unet":
        model = unet_nonlocal.unet_nonlocal(input_height=patch_h, input_width=patch_w)
    else:
        model = unet_nonlocal.res_unet_nonlocal(input_height=patch_h, input_width=patch_w)
    cp = [callbacks.EarlyStopping(monitor='val_dice',
                                  patience=10,
                                  mode='max'),
          callbacks.ModelCheckpoint(filepath=weights_save_path,
                                    monitor='val_dice',
                                    save_best_only=True,
                                    save_weights_only=True,
                                    mode='max',
                                    verbose=1)]
    print("Training " + model_name + " Model")
    if load_weights == True:
        print("Loading " + model_name + " Model Weights")
        model.load_weights(weights_save_path)
    history = model.fit(train_img, train_gt[..., None], batch_size=batch_size, epochs=epochs,
                            validation_data=(val_img, val_gt[..., None]),
                            shuffle=True, callbacks=cp)
    eval_metrics = model.evaluate(val_img, val_gt[..., None])
    visualize_loss(history)
    return eval_metrics
示例#3
0
def dense_train(space):
    ''' train lightgbm booster based on training / validaton set -> give predictions of Y '''

    params = space.copy()

    input_shape = (X_train.shape[-1], )  # input shape depends on x_fields used
    input_img = Input(shape=input_shape)

    init_nodes = params['init_nodes']  # fisrt dense layer - number of nodes
    nodes_mult = params['nodes_mult']  # nodes growth rate
    mult_freq = params['mult_freq']  # grow every X layer
    mult_start = params['mult_start']  # grow from X layer
    end_nodes = params['end_nodes']  # maximum number of nodes

    if params['num_Dense_layer'] < 4:
        params['init_nodes'] = init_nodes = 16

    d_1 = Dense(init_nodes, activation=params['activation'])(
        input_img)  # remove kernel_regularizer=regularizers.l1(params['l1'])
    d_1 = Dropout(params['dropout'])(d_1)

    for i in range(1, params['num_Dense_layer']):
        temp_nodes = int(
            min(
                init_nodes * (2**(nodes_mult * max(
                    (i - mult_start + 3) // mult_freq, 0))), end_nodes))
        d_1 = Dense(temp_nodes, activation=params['activation'])(d_1)

        if i != params[
                'num_Dense_layer'] - 1:  # last dense layer has no dropout
            d_1 = Dropout(params['dropout'])(d_1)

    f_x = Dense(1)(d_1)

    callbacks_list = [
        callbacks.ReduceLROnPlateau(monitor='val_loss',
                                    factor=0.1,
                                    patience=10),
        callbacks.EarlyStopping(monitor='val_loss', patience=10, mode='auto')
    ]  # add callbacks
    lr_val = 10**-int(params['learning_rate'])

    adam = optimizers.Adam(lr=lr_val)
    model = Model(input_img, f_x)
    model.compile(adam, loss='mae')
    model.summary()

    history = model.fit(X_train,
                        Y_train,
                        epochs=50,
                        batch_size=params['batch_size'],
                        validation_data=(X_valid, Y_valid),
                        callbacks=callbacks_list,
                        verbose=1)

    Y_test_pred = model.predict(X_test)
    Y_train_pred = model.predict(X_train)
    Y_valid_pred = model.predict(X_valid)

    return Y_test_pred, Y_train_pred, Y_valid_pred, history
示例#4
0
  def test_validate_callbacks_predefined_callbacks(self):
    supported_predefined_callbacks = [
        callbacks.TensorBoard(),
        callbacks.CSVLogger(filename='./log.csv'),
        callbacks.EarlyStopping(),
        callbacks.ModelCheckpoint(filepath='./checkpoint'),
        callbacks.TerminateOnNaN(),
        callbacks.ProgbarLogger(),
        callbacks.History(),
        callbacks.RemoteMonitor()
    ]

    distributed_training_utils.validate_callbacks(
        supported_predefined_callbacks, adam.Adam())

    unsupported_predefined_callbacks = [
        callbacks.ReduceLROnPlateau(),
        callbacks.LearningRateScheduler(schedule=lambda epoch: 0.001)
    ]

    for callback in unsupported_predefined_callbacks:
      with self.assertRaisesRegex(ValueError,
                                  'You must specify a Keras Optimizer V2'):
        distributed_training_utils.validate_callbacks([callback],
                                                      v1_adam.AdamOptimizer())
示例#5
0
def define_callbacks(output, batch_size):
    csv_logger = callbacks.CSVLogger(join(output, 'training.log'))
    earlystop = callbacks.EarlyStopping(monitor='val_loss', patience=2)
    tensorboard = callbacks.TensorBoard(batch_size=batch_size)
    fpath = join(
        output,
        'weights.{epoch:02d}-{loss:.2f}-{acc:.2f}-{val_loss:.2f}-{val_acc:.2f}.hdf5'
    )
    cp_cb = callbacks.ModelCheckpoint(filepath=fpath,
                                      monitor='val_loss',
                                      save_best_only=True)
    return [csv_logger, earlystop, tensorboard, cp_cb]
示例#6
0
def _create_validation_early_stopping(
        cfg_services: dict) -> callbacks.EarlyStopping:
    """Create an early stopping callback that monitors a validation 'metric'.

    Args:
        cfg_services: dict, services subsection of config.

    Returns:
        EarlyStopping, EarlyStopping callback that monitors a validation 'metric'.
    """
    early_stopping_params = cfg_services["validation_early_stopping"]
    early_stopping_params["monitor"] = _force_monitor_to_mode(
        early_stopping_params["monitor"], True, "validation_early_stopping")
    return callbacks.EarlyStopping(**early_stopping_params)
示例#7
0
def _create_train_early_stopping(
        cfg_services: dict,
        metrics_names: List[str]) -> callbacks.EarlyStopping:
    """Create an early stopping callback that monitors a training 'metric'.

    Args:
        cfg_services: dict, services subsection of config.
        metrics_names: list[str], 'metrics' names.

    Returns:
        EarlyStopping, EarlyStopping callback that monitors a training 'metric'.
    """
    early_stopping_params = cfg_services["train_early_stopping"]
    early_stopping_params["monitor"] = _force_monitor_to_mode(
        early_stopping_params["monitor"], metrics_names, False,
        "train_early_stopping")
    return callbacks.EarlyStopping(**early_stopping_params)
示例#8
0
def get_callbacks(args):
    """Define callbacks for distributed training."""
    callbacks = [
        # This is necessary to ensure consistent initialization of all workers
        hvd.callbacks.BroadcastGlobalVariablesCallback(0),
        # Note: must be in the list before the ReduceLROnPlateau or other metrics-based callbacks.
        hvd.callbacks.MetricAverageCallback(),
        # Adjust Learning Rate
        hvd.callbacks.LearningRateWarmupCallback(
            warmup_epochs=args.warmup_epochs)
    ]
    if args.train_only:
        # Reduce learning rate on a schedule
        onethirds_point = int(math.floor(args.epochs / 3))
        twothirds_point = int(math.floor(args.epochs / 3 * 2))
        callbacks.append(
            hvd.callbacks.LearningRateScheduleCallback(
                start_epoch=args.warmup_epochs,
                end_epoch=onethirds_point,
                multiplier=1.))
        callbacks.append(
            hvd.callbacks.LearningRateScheduleCallback(
                start_epoch=onethirds_point,
                end_epoch=twothirds_point,
                multiplier=1e-1))
        callbacks.append(
            hvd.callbacks.LearningRateScheduleCallback(
                start_epoch=twothirds_point,
                end_epoch=args.epochs + 1,
                multiplier=1e-2))
    else:
        # Reduce learning rate on validation loss plateau
        callbacks.append(
            cb.ReduceLROnPlateau(monitor='val_loss',
                                 factor=0.1,
                                 patience=5,
                                 min_lr=0.001,
                                 verbose=1 if hvd.rank() == 0 else 0))
    if args.early_stopping:
        callbacks.append(
            cb.EarlyStopping(monitor='loss',
                             patience=7,
                             restore_best_weights=True))
    print('Callbacks created on rank ' + str(hvd.rank()))
    return callbacks
def callb(path_checkpoint):
    callback_checkpoint = tf_cb.ModelCheckpoint(
        filepath=path_checkpoint, monitor = 'loss', verbose=1,
        save_weights_only=True, save_best_only=True)

    callback_earlystopping = tf_cb.EarlyStopping(monitor='loss',
                                                 patience=20, verbose=1)
    callback_reduce_lr = tf_cb.ReduceLROnPlateau(monitor='loss',
                                                 factor=0.98,
                                                 min_lr=0.3e-4,
                                                 patience=0,
                                                 verbose=1)
    callBacks = [
        callback_checkpoint,
        callback_earlystopping,
        callback_reduce_lr
    ]
    return callBacks
  def callableForTestEarlyStopping(model, test_obj, train_ds, num_epoch, steps,
                                   strategy, saving_filepath, **kwargs):

    class EpochCounterCallback(callbacks.Callback):

      def on_epoch_begin(self, epoch, logs):
        self.last_epoch = epoch

    epoch_counter_cbk = EpochCounterCallback()
    cbks = [
        callbacks.EarlyStopping(
            monitor='loss', min_delta=0.05, patience=1, verbose=1),
        epoch_counter_cbk
    ]

    # Empirically, it is expected that `model.fit()` would terminate around the
    # 22th epoch. Asserting that it should have been stopped before the 50th
    # epoch to avoid flakiness and be more predictable.
    model.fit(x=train_ds, epochs=100, steps_per_epoch=steps, callbacks=cbks)
    test_obj.assertLess(epoch_counter_cbk.last_epoch, 50)
示例#11
0
    def proc_early_stopping(test_obj):

      class EpochCounterCallback(callbacks.Callback):

        def on_epoch_begin(self, epoch, logs):
          self.last_epoch = epoch

      model, _, train_ds, steps = _model_setup(test_obj, file_format='')
      epoch_counter_cbk = EpochCounterCallback()
      cbks = [
          callbacks.EarlyStopping(
              monitor='loss', min_delta=0.05, patience=1, verbose=1),
          epoch_counter_cbk
      ]

      # Empirically, it is expected that `model.fit()` terminates around the
      # 22th epoch. Asserting that it should have been stopped before the 50th
      # epoch to avoid flakiness and be more predictable.
      model.fit(x=train_ds, epochs=100, steps_per_epoch=steps, callbacks=cbks)
      test_obj.assertLess(epoch_counter_cbk.last_epoch, 50)
示例#12
0
    def makeCb(self):
        #images_validation, labels_validation = self.loadData(self.validation_dataset_filepath)
        # Print the batch number at the beginning of every batch.
        trainedbatch = []
        batch_print_callback = callbacks.LambdaCallback(
            on_batch_begin=lambda batch, logs: trainedbatch.append(batch))

        # Stream the epoch loss to a file in JSON format. The file content
        # is not well-formed JSON but rather has a JSON object per line.

        json_log = open('loss_log.json', mode='wt', buffering=1)
        json_logging_callback = callbacks.LambdaCallback(
            on_epoch_end=lambda epoch, logs: json_log.write(
                json.dumps({
                    'epoch': epoch,
                    'loss': logs['loss'],
                    'mae': logs['mean_absolute_error'],
                    'val_loss': logs['val_loss'],
                    'val_mae': logs['val_mean_absolute_error']
                }) + '\n'),
            on_train_end=lambda logs: json_log.close())

        # Terminate some processes after having finished model training.
        processes = []
        cleanup_callback = callbacks.LambdaCallback(
            on_train_end=lambda logs:
            [p.terminate() for p in processes if p.is_alive()])

        tensorboard = callbacks.TensorBoard(log_dir="logs/{}".format(time()))

        callback = [
            callbacks.EarlyStopping(monitor='val_loss',
                                    patience=4,
                                    mode='auto'), batch_print_callback,
            json_logging_callback, cleanup_callback
        ]

        return callback
示例#13
0
def run_model(train_batches, validation_batches, num_classes):
    feature_extractor = hub.KerasLayer(MODULE_HANDLE,
                                       trainable=False,
                                       input_shape=IMAGE_SIZE + (3, ),
                                       output_shape=[FV_SIZE])

    model = Sequential(
        [feature_extractor,
         Dense(num_classes, activation='softmax')])

    optimizer = keras.optimizers.Adam(0.02)
    loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
    epochs = 50
    callback = callbacks.EarlyStopping(monitor='val_loss',
                                       patience=3,
                                       restore_best_weights=True)
    history = model.fit(train_batches,
                        epochs=epochs,
                        validation_data=validation_batches,
                        callbacks=callback,
                        verbose=True)
    model.save('cat_vs_dog_model')
    return model
示例#14
0
    def build_callbacks(self, conf, callbacks_list):
        '''
        The purpose of the method is to set up logging and history. It is based
        on Keras Callbacks
        https://github.com/fchollet/keras/blob/fbc9a18f0abc5784607cd4a2a3886558efa3f794/keras/callbacks.py

        Currently used callbacks include: BaseLogger, CSVLogger, EarlyStopping.
        Other possible callbacks to add in future: RemoteMonitor,
        LearningRateScheduler

        Argument list:
        - conf: There is a "callbacks" section in conf.yaml file.

        Relevant parameters are:
        - list: Parameter specifying additional callbacks, read
        in the driver script and passed as an argument of type  list (see next
        arg)

        - metrics: List of quantities monitored during training and validation

        - mode: one of {auto, min, max}. The decision to overwrite the current
        save file is made based on either the maximization or the minimization
        of the monitored quantity. For val_acc, this should be max, for
        val_loss this should be min, etc. In auto mode, the direction is
        automatically inferred from the name of the monitored quantity.

        - monitor: Quantity used for early stopping, has to
        be from the list of metrics

        - patience: Number of epochs used to decide on whether to apply early
          stopping or continue training

        - callbacks_list: uses callbacks.list configuration parameter,
          specifies the list of additional callbacks Returns: modified list of
          callbacks

        '''

        mode = conf['callbacks']['mode']
        monitor = conf['callbacks']['monitor']
        patience = conf['callbacks']['patience']
        csvlog_save_path = conf['paths']['csvlog_save_path']
        # CSV callback is on by default
        if not os.path.exists(csvlog_save_path):
            os.makedirs(csvlog_save_path)

        callbacks_list = conf['callbacks']['list']
        callbacks = [cbks.BaseLogger()]
        callbacks += [self.history]
        callbacks += [
            cbks.CSVLogger("{}callbacks-{}.log".format(
                csvlog_save_path,
                datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")))
        ]

        if "earlystop" in callbacks_list:
            callbacks += [
                cbks.EarlyStopping(patience=patience,
                                   monitor=monitor,
                                   mode=mode)
            ]
        if "lr_scheduler" in callbacks_list:
            pass

        return cbks.CallbackList(callbacks)
示例#15
0
def rnn_train(space):  #functional
    ''' train lightgbm booster based on training / validaton set -> give predictions of Y '''
    params = space.copy()

    lookback = 20  # lookback = 5Y * 4Q = 20Q
    x_fields = 10  # lgbm top15 features -> 10 features in rnn

    inputs_loss_weight = 0.1  # loss weights for individual outputs from each rnn model
    dense_loss_weight = 2  # loss weights for final output
    loss_weights = [inputs_loss_weight] * x_fields + [
        dense_loss_weight
    ]  # loss weights for training

    loss = [args.objective] * (
        x_fields + 1)  # use MAE loss function for all inputs and final
    metrics = [args.objective] * (x_fields + 1)

    input_img = Input(shape=(lookback, x_fields))
    outputs = []
    states = []

    for col in range(10):  # build model for each feature

        g_1 = K.expand_dims(
            input_img[:, :, col], axis=2
        )  # add dimension to certain feature: shape = (samples, 20, 1)

        for i in range(params['num_gru_layer']):
            temp_nodes = int(
                min(params['gru_nodes'] * (2**(params['gru_nodes_mult'] * i)),
                    8))
            extra = dict(return_sequences=True)

            if args.bi == False:
                if i == params['num_gru_layer'] - 1:
                    extra = dict(return_sequences=False)
                    g_state = GRU(temp_nodes, **extra)(g_1)  # forecast state
                elif i == 0:
                    g_1 = GRU(temp_nodes, **extra)(g_1)
                else:
                    g_1 = GRU(temp_nodes,
                              dropout=params['gru_dropout'],
                              **extra)(g_1)

            else:  # try bidirectional one
                if i == params['num_gru_layer'] - 1:
                    extra = dict(return_sequences=False)
                    g_state = GRU(temp_nodes, **extra)(g_1)  # forecast state
                elif i == 0:
                    g_1 = Bidirectional(GRU(temp_nodes, **extra))(g_1)
                else:
                    g_1 = Bidirectional(
                        GRU(temp_nodes, dropout=params['gru_dropout'],
                            **extra))(g_1)

        g_output = Dense(1)(g_state)

        states.append(g_state)
        outputs.append(g_output)

    f_x = Concatenate(axis=1)(states)
    for i in range(
            params['num_dense_layer']):  # for second or third dense layers
        f_x = Dense(10)(f_x)

    f_x = Dense(1, name='final_dense')(f_x)

    outputs.append(f_x)
    model = Model(
        inputs=input_img,
        outputs=outputs)  # outputs = 10 forecast states + final forecast

    callbacks_list = [
        callbacks.ReduceLROnPlateau(monitor='val_loss',
                                    factor=0.1,
                                    patience=10),
        callbacks.EarlyStopping(monitor='val_loss', patience=10, mode='auto')
    ]  # add callbacks
    lr_val = 10**-int(params['learning_rate'])
    adam = optimizers.Adam(lr=lr_val)
    model.compile(adam, loss=loss, metrics=metrics, loss_weights=loss_weights)
    model.summary()

    history = model.fit(X_train, [Y_train] * (x_fields + 1),
                        epochs=50,
                        batch_size=params['batch_size'],
                        validation_data=(X_valid, [Y_valid] * (x_fields + 1)),
                        verbose=1,
                        callbacks=callbacks_list)

    Y_test_pred = model.predict(X_test)[-1]  # final dense predictions
    Y_train_pred = model.predict(X_train)[-1]
    Y_valid_pred = model.predict(X_valid)[-1]

    return Y_test_pred, Y_train_pred, Y_valid_pred, history
示例#16
0
def train(x_train, y_train, x_test, y_test, epochs):

    #  calculate classes
    if np.unique(y_train).shape[0] == np.unique(y_test).shape[0]:
        #
        num_classes = np.unique(y_train).shape[0]
    else:
        print('Error in class data...')
        return -2

    # set validation data
    '''val_size = int(0.1 * x_train.shape[0])
    r = np.random.randint(0, x_train.shape[0], size=val_size)
    x_val = x_train[r, :, :]
    y_val = y_train[r]
    x_train = np.delete(x_train, r, axis=0)
    y_train = np.delete(y_train, r, axis=0)'''
    step = int(x_train.shape[0] * 0.005)
    length = int(x_train.shape[0] * 0.1 * 0.005)
    r = []
    for i in range(0, x_train.shape[0] - length, step):
        r.extend(range(i, i + length))
    x_val = x_train[r, :, :]
    y_val = y_train[r]
    x_train = np.delete(x_train, r, axis=0)
    y_train = np.delete(y_train, r, axis=0)

    print('\nInitializing CNN2D...')
    print('\nclasses:', num_classes)
    print('x train shape:',
          x_train.shape), print('x val shape:',
                                x_val.shape), print('x test shape:',
                                                    x_test.shape)
    print('y train shape:',
          y_train.shape), print('y val shape:',
                                y_val.shape), print('y test shape:',
                                                    y_test.shape)
    print("\nTrain split with mean|std {:.2f}|{:.2f}".format(
        np.mean(x_train), np.std(x_train)))
    print("Test split with mean|std {:.2f}|{:.2f}".format(
        np.mean(x_test), np.std(x_test)))

    # shape data
    x_train = x_train.reshape(x_train.shape[0], x_train.shape[1],
                              x_train.shape[2], 1)
    x_val = x_val.reshape(x_val.shape[0], x_val.shape[1], x_val.shape[2], 1)
    x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2],
                            1)
    y_train = tf.keras.utils.to_categorical(y_train, num_classes)
    y_val = tf.keras.utils.to_categorical(y_val, num_classes)
    y_test = tf.keras.utils.to_categorical(y_test, num_classes)

    # define the model
    activation = 'elu'
    regularizer = 0.0000
    dropout = 0.25

    # preprocessing
    '''
    offset = 1.0 * np.std(x_train)
    dc0 = (x)
    dc1 = GaussianNoise(offset*0.1)(x)
    dc2 = GaussianDropout(dropout)(x)
    dc3 = Lambda(lambda r: r + __import__('keras').backend.random_uniform((1,), -offset, offset))(x)
    dc4 = Lambda(lambda r: r + __import__('keras').backend.random_uniform((1,), -offset, offset))(x)
    m = Concatenate()([dc0, dc1, dc2, dc3, dc4])
    m = Lambda(lambda r: r - __import__('keras').backend.mean(r))(x)
    '''

    # sequential

    model = Sequential()
    model.add(
        Conv2D(16,
               kernel_size=(3, 3),
               strides=(2, 1),
               activation='elu',
               kernel_regularizer=regularizers.l2(regularizer),
               input_shape=(x_train.shape[1], x_train.shape[2], 1)))
    model.add(EntropyPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=(1, 1),
               activation='elu',
               kernel_regularizer=regularizers.l2(regularizer)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    model.add(
        Conv2D(64,
               kernel_size=(3, 3),
               strides=(1, 1),
               activation='elu',
               kernel_regularizer=regularizers.l2(regularizer)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    # model.add(Conv2D(128, kernel_size=(3, 3), strides=(1, 1), activation='elu', kernel_regularizer=regularizers.l2(regularizer)))
    # model.add(MaxPooling2D(pool_size=(1, 2)))
    # model.add(Dropout(dropout))
    model.add(Flatten())
    model.add(
        Dense(64,
              activation='elu',
              kernel_regularizer=regularizers.l2(regularizer)))
    model.add(Dropout(dropout))
    model.add(Dense(num_classes, activation='softmax'))

    # functional
    '''
    x = Input((x_train.shape[1], x_train.shape[2], x_train.shape[3]))
    m = Conv2D(16, 3, activation=activation , kernel_regularizer=regularizers.l2(regularizer))(x)
    m = EntropyPooling2D((2, 2))(m)
    m = Dropout(dropout)(m)
    m = Conv2D(32, 3, activation=activation, kernel_regularizer=regularizers.l2(regularizer))(m)
    m = EntropyPooling2D((2, 2))(m)
    m = Dropout(dropout)(m)
    m = Conv2D(64, 3, activation=activation, kernel_regularizer=regularizers.l2(regularizer))(m)
    m = EntropyPooling2D((2, 2))(m)
    m = Dropout(dropout)(m)
    if x_train.shape[1] < 50:
        #
        m = Flatten()(m)
    else:
        m = Conv2D(128, 3, activation=activation, kernel_regularizer=regularizers.l2(regularizer))(m)
        m = GlobalAveragePooling2D()(m)
        m = Dropout(dropout)(m)
    m = (Dense(64, activation=activation, kernel_regularizer=regularizers.l2(regularizer)))(m)
    m = Dropout(dropout)(m)
    y = Dense(num_classes, activation='softmax')(m)
    model = Model(inputs=[x], outputs=[y])
    '''

    # summarize model
    for i in range(0, len(model.layers)):
        if i == 0:
            plot_model(model, to_file='Models\\model_cnn2d.png')
            # f = open('Models\\model_cnn2d.txt', 'w')
            # print(' ')
        # print('{}. Layer {} with input / output shapes: {} / {}'.format(i, model.layers[i].name, model.layers[i].input_shape, model.layers[i].output_shape))
        # f.write('{}. Layer {} with input / output shapes: {} / {} \n'.format(i, model.layers[i].name, model.layers[i].input_shape, model.layers[i].output_shape))
        if i == len(model.layers) - 1:
            # f.close()
            print(' ')
            model.summary()

    # compile, fit evaluate
    callback = [
        callbacks.EarlyStopping(monitor='val_acc',
                                min_delta=0.01,
                                patience=10,
                                restore_best_weights=True)
    ]
    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.Adam(),
                  metrics=['accuracy'])
    model.fit(x_train,
              y_train,
              batch_size=256,
              epochs=epochs,
              verbose=2,
              validation_data=(x_val, y_val),
              callbacks=callback)
    score = model.evaluate(x_test, y_test, verbose=2)

    # evaluate on larger frames
    aggr_size = 5
    for i in range(0, y_test.shape[0] - aggr_size, aggr_size):
        if i == 0:
            y_pred = model.predict(x_test)
            y_pred = np.argmax(y_pred, axis=1)
            y_test = np.argmax(y_test, axis=1)
            y_aggr_test = []
            y_aggr_pred = []
        if np.unique(y_test[i:i + aggr_size]).shape[0] == 1:
            y_aggr_test.append(stats.mode(y_test[i:i + aggr_size])[0][0])
            y_aggr_pred.append(stats.mode(y_pred[i:i + aggr_size])[0][0])
    # print(confusion_matrix(np.argmax(y_test, axis=1), np.argmax(y_pred, axis=1)))
    scipy_score = classification_report(y_aggr_test,
                                        y_aggr_pred,
                                        output_dict=True)['accuracy']
    print('short {:.2f} and aggr {:.2f}'.format(score[1], scipy_score))

    # save model
    open("Models\\model_cnn2d.json", "w").write(model.to_json())
    pickle.dump(model.get_config(), open("Models\\model_cnn2d.pickle", "wb"))
    model.save_weights("Models\\model_cnn2d.h5")

    # results
    return score[1]
示例#17
0
def creat_net(train_generator, validation_generator, batch_size, image_lengh,
              image_width):
    base_model = applications.InceptionResNetV2(weights='imagenet',
                                                include_top=False,
                                                input_shape=(image_width,
                                                             image_lengh, 3))
    for layer in base_model.layers:
        layer.trainable = False
    x = base_model.output
    x = tf.keras.layers.Conv2D(128,
                               kernel_size=(3, 3),
                               strides=(1, 1),
                               activation='relu')(x)
    x = tf.keras.layers.Dropout(0.2)(x)
    x = tf.keras.layers.GlobalAveragePooling2D()(x)
    x = tf.keras.layers.Dropout(0.2)(x)
    x = tf.keras.layers.Dense(4, activation='softmax')(x)
    model = Model(base_model.layers[0].input, x)
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    #保存最优模型
    filepath = './模型/InceptionResNetV2_weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5'
    checkpoint = callbacks.ModelCheckpoint(filepath,
                                           monitor='val_accuracy',
                                           verbose=1,
                                           save_best_only=True,
                                           mode='max')
    early = callbacks.EarlyStopping(monitor='val_loss',
                                    min_delta=0,
                                    patience=0,
                                    verbose=0,
                                    mode='auto',
                                    baseline=None,
                                    restore_best_weights=False)
    model.fit_generator(train_generator,
                        epochs=30,
                        steps_per_epoch=1707 // batch_size,
                        validation_data=validation_generator,
                        validation_steps=264 // batch_size,
                        callbacks=[checkpoint, checkpoint])  #Reduce])

    #绘制误差和准确率曲线
    loss = model.history.history['loss']
    val_loss = model.history.history['val_loss']
    epoches = range(1, len(loss) + 1)
    acc = model.history.history['accuracy']
    val_acc = model.history.history['val_accuracy']
    plt.subplot(121)
    plt.plot(epoches, loss, 'bo', label='training_loss')
    plt.plot(epoches, val_loss, 'r', label='validation_loss')
    plt.xlabel('epoches')
    plt.ylabel('loss')
    plt.title('losses of train and val')
    plt.legend()
    plt.subplot(122)
    plt.plot(epoches, acc, 'bo', label='training_acc')
    plt.plot(epoches, val_acc, 'r', label='validation_acc')
    plt.xlabel('epoches')
    plt.ylabel('acc')
    plt.title('accuracy of train and val')
    plt.legend()
    plt.show()
from GVHD_BAR.show_data import calc_results_and_plot
from GVHD_BAR.calculate_distances import calculate_distance
from GVHD_BAR.cluster_time_events import cluster_based_on_time
import os

SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
RECORD = True
PLOT = False
USE_SIMILARITY = False
USE_CLUSTER = False
USE_CLOSEST_NEIGHBOR = False
USE_CERTAINTY = False
PLOT_INPUT_TO_NN_STATS = False

callbacks_ = callbacks.EarlyStopping(monitor='my_mse_loss',
                              min_delta=0.5,
                              patience=10,
                              verbose=0, mode='auto')

# @autograph.convert()
def my_loss(y_true, y_pred):
    mse_loss = my_mse_loss(y_true, y_pred)

    time_sense_loss = y_true[:, 2] - y_pred[:, 1]  # Max_delta - predicted_delta should be negative
    tsls = time_sense_loss #tf.square(time_sense_loss)

    return y_true[:, 4] * tsls + y_true[:, 3] * mse_loss

def my_mse_loss(y_true, y_pred):
    mse_loss = tf.reduce_mean(tf.square(y_true[:, 1] - y_pred[:, 1]))

    return mse_loss
示例#19
0
    # Build the model
    model = inception_resnet_v2.build_inception_resnet_v2_notop(image_dimensions = (image_height, image_width, image_depth)
    , pooling = None
    , size_final_dense = 256
    , num_classes = num_classes
    , trainable = True
    , weights = None
    , final_regulariser = l1(0.01))
    #model = multi_gpu_model(model, gpus=2)
    print("MODEL BUILT")

    # Now train it
    opt_RMSprop = RMSprop(lr=0.0001)
    model.compile(optimizer=opt_RMSprop,loss='categorical_crossentropy', metrics=['accuracy'])
    callback_lr_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5)
    callback_earlystopping = callbacks.EarlyStopping(monitor='val_loss', patience=3)
    print("MODEL COMPILED")

    train_start = time.time()
    os.makedirs(os.path.dirname(dir_tensorboard_logs), exist_ok=True) # Make tensorboard log directory
    model.fit(dataset_train
    , epochs=num_epochs
    , steps_per_epoch=num_steps_per_epoch
    , validation_data=dataset_valid
    , validation_steps=num_steps_per_epoch_valid
    , callbacks = [callback_tensorboard, callback_lr_plateau]
    )
    print("Training time: %s seconds" % (time.time() - train_start))
    print(model.summary())

示例#20
0
    def train(self):
        self.build_and_compile_model()

        # Identify last version of trained model
        files = get_checkpoint_file_list(self.config.checkpoint_dir,
                                         self.config.model_name)

        if not files:
            model_number = self.config.model_name + "_0"
        else:
            # Resume training vs new training decision
            if self.config.resume_train:
                print("Resume training from previous checkpoint")
                try:
                    self.recover_model_from_checkpoint()
                    model_number = self.model_version_number
                except FileNotFoundError:
                    print("Model not found. Creating new model")
                    model_number = self.model_version_number
                    safe_mkdir_recursive(self.config.checkpoint_dir +
                                         model_number)
            else:
                model_number = self.config.model_name + '_' + str(
                    int(files[-1].split('_')[-1]) + 1)
                os.mkdir(self.config.checkpoint_dir + model_number)

        self.trained_model_dir = self.config.checkpoint_dir + model_number + '/'

        # Get training and validation datasets from saved files
        dataset = self.get_dataset(train=True,
                                   val_split=True,
                                   random_split=False,
                                   shuffle=True,
                                   repeat_ds=True,
                                   normalize=False)
        train_ds, validation_ds, ds_lengths = dataset

        train_steps_per_epoch = int(
            math.ceil(ds_lengths[0] / self.config.batch_size))
        val_steps_per_epoch = int(
            math.ceil((ds_lengths[1] / self.config.batch_size)))

        def lr_scheduler(epoch, lr):
            decay_rate = 0.5
            if epoch % self.config.lr_scheduler == 0 and epoch:
                return lr * decay_rate
            return lr

        keras_callbacks = [
            callbacks.EarlyStopping(patience=self.config.patience,
                                    monitor='val_loss'),
            callbacks.TensorBoard(write_images=True,
                                  log_dir=self.config.checkpoint_dir +
                                  model_number + "/keras",
                                  histogram_freq=self.config.summary_freq),
            callbacks.LearningRateScheduler(lr_scheduler, verbose=1),
            CustomModelCheckpoint(filepath=os.path.join(
                self.config.checkpoint_dir + model_number,
                self.config.model_name + "_{epoch:02d}.h5"),
                                  save_weights_only=True,
                                  verbose=1,
                                  period=self.config.save_freq,
                                  extra_epoch_number=self.last_epoch_number +
                                  1),
        ]

        # Train!
        self.trainable_model.fit(train_ds,
                                 verbose=1,
                                 epochs=self.config.max_epochs,
                                 steps_per_epoch=train_steps_per_epoch,
                                 validation_steps=val_steps_per_epoch,
                                 validation_data=validation_ds,
                                 callbacks=keras_callbacks)
示例#21
0
        activation=activation,
        name=name))
model.add(layers.Dense(num_classes, activation='softmax'))

model.compile(
    optimizer=optimizers.Adam(lr=learning_rate),
    loss='categorical_crossentropy',
    metrics=['accuracy'])

util.save_model_summary(model)

# Train

callbacks = [
    callbacks.EarlyStopping(
        monitor='val_acc',
        patience=2),
    callbacks.TensorBoard(
        log_dir='logs',
        histogram_freq=0,
        batch_size=32,
        write_graph=True,
        write_grads=False,
        write_images=False),
    callbacks.ModelCheckpoint(
        filepath='weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
]

model.fit(
    x=data.train.images,
    y=data.train.labels,