def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--patch_size', type=int, default=128)
    parser.add_argument('--keep_range', action='store_true')
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--depth', type=int, default=4)
    parser.add_argument('--epochs', type=int, default=20)
    parser.add_argument('--lr', type=float, default=1e-4)
    parser.add_argument('--lr_gamma', type=float, default=0.5)
    parser.add_argument('--milestones', nargs='+', default=[10, 15])
    parser.add_argument('--exp_name', type=str, default='baseline')
    parser.add_argument('--save_as', type=str, default='models/deblur.hdf5')
    cfg = parser.parse_args()

    # For checking the GPU usage
    #tf.debugging.set_log_device_placement(True)
    # For limiting the GPU usage
    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            tf.config.experimental.set_memory_growth(gpus[0], True)
        except RuntimeError as e:
            print(e)

    dataset_train = data.REDS(
        cfg.batch_size,
        patch_size=cfg.patch_size,
        train=True,
        keep_range=cfg.keep_range,
    )
    # We have 3,000 validation frames.
    # Note that each frame will be center-cropped for the validation.
    dataset_val = data.REDS(
        20,
        patch_size=cfg.patch_size,
        train=False,
        keep_range=cfg.keep_range,
    )

    if cfg.depth == 4:
        net = model.Baseline(cfg.patch_size, cfg.patch_size)
    else:
        net_class = getattr(model, 'Small{}'.format(cfg.depth))
        net = net_class(cfg.patch_size, cfg.patch_size)

    net.build(input_shape=(None, cfg.patch_size, cfg.patch_size, 3))
    kwargs = {'optimizer': 'adam', 'loss': 'mse'}
    if cfg.keep_range:
        net.compile(**kwargs, metrics=[metric.psnr_full])
    else:
        net.compile(**kwargs, metrics=[metric.psnr])
    net.summary()

    # Callback functions
    # For TensorBoard logging
    logging = callbacks.TensorBoard(
        log_dir=path.join('logs', cfg.exp_name),
        update_freq=100,
    )
    # For checkpointing
    os.makedirs(path.dirname(cfg.save_as), exist_ok=True)
    checkpointing = callbacks.ModelCheckpoint(
        cfg.save_as,
        verbose=1,
        save_weights_only=True,
    )
    def scheduler(epoch):
        idx = bisect.bisect_right(cfg.milestones, epoch)
        lr = cfg.lr * (cfg.lr_gamma**idx)
        return lr
    # For learning rate scheduling
    scheduling = callbacks.LearningRateScheduler(scheduler, verbose=1)

    net.fit_generator(
        dataset_train,
        epochs=cfg.epochs,
        callbacks=[logging, checkpointing, scheduling],
        validation_data=dataset_val,
        validation_freq=1,
        max_queue_size=16,
        workers=8,
        use_multiprocessing=True,
    )
Exemple #2
0
    # Date time string.
    datetime_string = utils.get_datetime_string() + "_{}-{}".format(
        len(qrcodes_train), len(qrcodes_validate)) + "_".join(
            dataset_parameters["output_targets"])

    # Output path. Ensure its existence.
    output_path = os.path.join("/whhdata/models", datetime_string)
    if os.path.exists(output_path) == False:
        os.makedirs(output_path)
    print("Using output path:", output_path)

    # Important things.
    pp = pprint.PrettyPrinter(indent=4)
    log_dir = os.path.join("/whhdata/models", "logs", datetime_string)
    tensorboard_callback = callbacks.TensorBoard(log_dir=log_dir)
    histories = {}

    # Training network.
    def train_rgbmaps():

        sequence_length = dataset_parameters["sequence_length"]

        model = models.Sequential()
        if sequence_length == 0:
            model.add(
                layers.Conv2D(64, (3, 3),
                              activation="relu",
                              input_shape=(image_size, image_size, 3)))
        else:
            model.add(
               optimizer=optimizers.RMSprop(lr=2e-5),
               metrics=['acc'])

#สร้าง folder TensorBoard
root_logdir = '/media/tohn/SSD/FP_All_Nor_Abnor_B3NA_25/my_logs'


def get_run_logdir():
    import time
    run_id = time.strftime("run_%Y_%m_%d_%H_%M_%S")
    return os.path.join(root_logdir, run_id)


run_logdir = get_run_logdir()

tensorboard_cb = callbacks.TensorBoard(run_logdir)

#ส้รางไฟล์เก็บโมเดล
os.makedirs("./models", exist_ok=True)

history = model2.fit_generator(train_generator,
                               steps_per_epoch=NUM_TRAIN // batch_size,
                               epochs=epochs,
                               validation_data=validation_generator,
                               validation_steps=NUM_TEST // batch_size,
                               verbose=1,
                               use_multiprocessing=True,
                               workers=1,
                               callbacks=[
                                   tensorboard_cb,
                                   callbacks.ModelCheckpoint(
Exemple #4
0
    def _build_callback_hooks(self,
                              models_dir: str,
                              logs_dir: str,
                              is_training=True,
                              logging_frequency=25):
        """
        Build callback hooks for the training loop

        Returns:
            callbacks_list: list of callbacks
        """
        callbacks_list: list = list()

        if is_training:
            # Model checkpoint
            if models_dir:
                checkpoints_path = os.path.join(models_dir, CHECKPOINT_FNAME)
                cp_callback = callbacks.ModelCheckpoint(
                    filepath=checkpoints_path,
                    save_weights_only=False,
                    verbose=1,
                    save_best_only=True,
                    mode="max",
                    monitor="val_new_MRR",
                )
                callbacks_list.append(cp_callback)

            # Early Stopping
            early_stopping_callback = callbacks.EarlyStopping(
                monitor="val_new_MRR",
                mode="max",
                patience=2,
                verbose=1,
                restore_best_weights=True)
            callbacks_list.append(early_stopping_callback)

        # TensorBoard
        if logs_dir:
            tensorboard_callback = callbacks.TensorBoard(log_dir=logs_dir,
                                                         histogram_freq=1,
                                                         update_freq=5)
            callbacks_list.append(tensorboard_callback)

        # Debugging/Logging
        logger = self.logger

        class DebuggingCallback(callbacks.Callback):
            def __init__(self, patience=0):
                super(DebuggingCallback, self).__init__()

                self.epoch = 0

            def on_train_batch_end(self, batch, logs=None):
                if batch % logging_frequency == 0:
                    logger.info("[epoch: {} | batch: {}] {}".format(
                        self.epoch, batch, logs))

            def on_epoch_end(self, epoch, logs=None):
                logger.info("End of Epoch {}".format(self.epoch))
                logger.info(logs)

            def on_epoch_begin(self, epoch, logs=None):
                self.epoch = epoch + 1
                logger.info("Starting Epoch : {}".format(self.epoch))
                logger.info(logs)

            def on_train_begin(self, logs):
                logger.info("Training Model")

            def on_test_begin(self, logs):
                logger.info("Evaluating Model")

            def on_predict_begin(self, logs):
                logger.info("Predicting scores using model")

            def on_train_end(self, logs):
                logger.info("Completed training model")
                logger.info(logs)

            def on_test_end(self, logs):
                logger.info("Completed evaluating model")
                logger.info(logs)

            def on_predict_end(self, logs):
                logger.info("Completed Predicting scores using model")
                logger.info(logs)

        callbacks_list.append(DebuggingCallback())

        # Add more here

        return callbacks_list
Exemple #5
0
def train_model(training_data,
                output_model,
                loss_function='categorical_crossentropy',
                validation_data=None,
                fine_tune=None,
                tensorboard=False):
    """
    Train message segmentation model.

    :param training_data: JSON file with training data
    :param output_model: path prefix for model checkpoints
    :param loss_function: optimization loss function
    :param validation_data: JSON file with validation data
    :param fine_tune: fine-tune model from given file instead of training from scratch
    :param tensorboard: Tensorboard log data directory
    """

    tb_callback = callbacks.TensorBoard(log_dir='./data/graph/' +
                                        str(datetime.now()),
                                        update_freq='batch',
                                        write_graph=False,
                                        write_images=False)
    es_callback = callbacks.EarlyStopping(monitor='val_loss',
                                          verbose=1,
                                          patience=5)
    cp_callback = callbacks.ModelCheckpoint(
        output_model + '.epoch-{epoch:02d}.val_loss-{val_loss:.3f}.h5')
    cp_callback_no_val = callbacks.ModelCheckpoint(
        output_model + '.epoch-{epoch:02d}.loss-{loss:.3f}.h5')

    def get_line_model():
        line_input = layers.Input(shape=(LINE_LEN, INPUT_DIM))
        masking = layers.Masking(0)(line_input)
        bi_seq = layers.Bidirectional(layers.GRU(128),
                                      merge_mode='sum')(masking)
        bi_seq = layers.BatchNormalization()(bi_seq)
        bi_seq = layers.Activation('relu')(bi_seq)
        return line_input, bi_seq

    def get_context_model():
        context_input = layers.Input(shape=CONTEXT_SHAPE)
        conv2d = layers.Conv2D(128, (4, 4))(context_input)
        conv2d = layers.BatchNormalization()(conv2d)
        conv2d = layers.Activation('relu')(conv2d)
        conv2d = layers.Conv2D(128, (3, 3))(conv2d)
        conv2d = layers.Activation('relu')(conv2d)
        conv2d = layers.MaxPooling2D(2)(conv2d)
        flatten = layers.Flatten()(conv2d)
        dense = layers.Dense(128)(flatten)
        dense = layers.Activation('relu')(dense)
        return context_input, dense

    def get_base_model():
        line_input_cur, line_model_cur = get_line_model()
        line_input_prev, line_model_prev = get_line_model()
        context_input, context_model = get_context_model()

        concat = layers.concatenate(
            [line_model_cur, line_model_prev, context_model])
        dropout = layers.Dropout(0.25)(concat)
        dense = layers.Dense(len(MailLinesSequence.LABEL_MAP))(dropout)
        output = layers.Activation('softmax')(dense)

        return models.Model(
            inputs=[line_input_cur, line_input_prev, context_input],
            outputs=output)

    if fine_tune is None:
        segmenter = get_base_model()
    else:
        segmenter = models.load_model(fine_tune)
        logger.info('Freezing layers')

        for layer in segmenter.layers[:-2]:
            layer.trainable = False

    compile_args = {
        'optimizer': 'adam',
        'loss': loss_function,
        'metrics': ['categorical_accuracy']
    }

    effective_callbacks = [
        es_callback, cp_callback
    ] if validation_data is not None else [cp_callback_no_val]
    if tensorboard:
        effective_callbacks.append(tb_callback)

    segmenter.compile(**compile_args)
    segmenter.summary()

    train_seq = MailLinesSequence(training_data,
                                  CONTEXT_SHAPE,
                                  labeled=True,
                                  batch_size=TRAIN_BATCH_SIZE)
    val_seq = MailLinesSequence(
        validation_data,
        CONTEXT_SHAPE,
        labeled=True,
        batch_size=INF_BATCH_SIZE) if validation_data else None

    epochs = 20 if fine_tune is None else 10
    segmenter.fit_generator(train_seq,
                            epochs=epochs,
                            validation_data=val_seq,
                            shuffle=True,
                            use_multiprocessing=False,
                            workers=train_seq.num_workers,
                            max_queue_size=train_seq.max_queue_size,
                            callbacks=effective_callbacks)
Exemple #6
0
        return tf.matmul(inputs, self.kernel)

    def compute_output_shape(self, input_shape):
        shape = tf.TensorShape(input_shape).as_list()
        shape[-1] = self.output_dim
        return tf.TensorShape(shape)

    def get_config(self):
        base_config = super(MyLayer, self).get_config()
        base_config['output_dim'] = self.output_dim
        return base_config

    @classmethod
    def from_config(cls, config):
        return cls(**config)


callbacks = [
    # Interrupt training if `val_loss` stops improving for over 2 epochs
    callbacks.EarlyStopping(patience=2, monitor='val_loss'),
    # Write TensorBoard logs to `./logs` directory
    callbacks.TensorBoard(log_dir='./logs')
]
# model.fit(data, labels, batch_size=32, epochs=5, callbacks=callbacks,
#           validation_data=(val_data, val_labels))

# show_network(model)

if __name__ == '__main__':
    pass
def get_callbacks(CONF, use_lr_decay=True):
    """
    Get a callback list to feed fit_generator.
    #TODO Use_remote callback needs proper configuration
    #TODO Add ReduceLROnPlateau callback?

    Parameters
    ----------
    CONF: dict

    Returns
    -------
    List of callbacks
    """

    calls = []

    # Add mandatory callbacks
    calls.append(callbacks.TerminateOnNaN())
    calls.append(LRHistory())

    # Add optional callbacks
    if use_lr_decay:
        milestones = np.array(
            CONF['training']['lr_step_schedule']) * CONF['training']['epochs']
        milestones = milestones.astype(np.int)
        calls.append(
            LR_scheduler(lr_decay=CONF['training']['lr_step_decay'],
                         epoch_milestones=milestones.tolist()))

    if CONF['monitor']['use_tensorboard']:
        calls.append(
            callbacks.TensorBoard(log_dir=paths.get_logs_dir(),
                                  write_graph=False))

        # # Let the user launch Tensorboard
        # print('Monitor your training in Tensorboard by executing the following comand on your console:')
        # print('    tensorboard --logdir={}'.format(paths.get_logs_dir()))
        # Run Tensorboard  on a separate Thread/Process on behalf of the user
        port = os.getenv('monitorPORT', 6006)
        port = int(port) if len(str(port)) >= 4 else 6006
        subprocess.run(['fuser', '-k', '{}/tcp'.format(port)
                        ])  # kill any previous process in that port
        p = Process(target=launch_tensorboard, args=(port, ), daemon=True)
        p.start()

    if CONF['monitor']['use_remote']:
        calls.append(callbacks.RemoteMonitor())

    if CONF['training']['use_validation'] and CONF['training'][
            'use_early_stopping']:
        calls.append(
            callbacks.EarlyStopping(patience=int(0.1 *
                                                 CONF['training']['epochs'])))

    if CONF['training']['ckpt_freq'] is not None:
        calls.append(
            callbacks.ModelCheckpoint(os.path.join(paths.get_checkpoints_dir(),
                                                   'epoch-{epoch:02d}.hdf5'),
                                      verbose=1,
                                      period=max(
                                          1,
                                          int(CONF['training']['ckpt_freq'] *
                                              CONF['training']['epochs']))))

    if not calls:
        calls = None

    return calls
Exemple #8
0
        try:
            learning_rates = np.logspace(*learning_rate, opts['epochs'])
        except ValueError:
            raise ValueError(
                '`learning_rate` has to either a single float or a tuple of two floats [rate_beginning, rate_end]'
            )

    def lr_callback_func(epoch):
        learning_rate = learning_rates[epoch - initial_epoch]
        tf.summary.scalar('learning rate', data=learning_rate, step=epoch)
        return learning_rate

    lr_callback = callbacks.LearningRateScheduler(lr_callback_func)

    checkpoint_path = output_dir / 'logs' / 'models' / 'cp-{epoch:04d}.ckpt'
    model_checkpoint_callback = callbacks.ModelCheckpoint(str(checkpoint_path))
    writer = tf.summary.create_file_writer(logdir=str(output_dir / 'logs' /
                                                      'test'))
    tensorboard_callback = callbacks.TensorBoard(log_dir=output_dir / 'logs')
    images = np.array(*test_dataset_batched.take(1).as_numpy_iterator())[0]

    model.fit(train_dataset_batched,
              initial_epoch=initial_epoch,
              epochs=initial_epoch + opts.get('epochs', 10),
              validation_data=test_dataset_batched,
              callbacks=[
                  lr_callback, tensorboard_callback,
                  TestImagesTensorboard(images, writer),
                  model_checkpoint_callback
              ])
            model.add(layers.MaxPooling2D(pool_size=(2, 2)))

            for i in range(conv_layer - 1):
                model.add(layers.Conv2D(layer_size, (3, 3), activation='relu'))
                model.add(layers.MaxPooling2D(pool_size=(2, 2)))

            model.add(layers.Flatten())
            model.add(layers.Dropout(dropout_set))

            model.add(layers.Dense(1, activation='sigmoid'))

            model.compile(loss='binary_crossentropy',
                          optimizer=optimizers.Adam(learning_rate=0.1),
                          metrics=['acc'])
            NAME = "{}-conv-{}-nodes-{}-dropout-{}".format(
                conv_layer, layer_size, dropout_set,
                datetime.now().strftime("%H%M%S"))
            tensorboard = callbacks.TensorBoard(log_dir='catdog\\logs\\%s' %
                                                NAME)
            model.fit(x=X,
                      y=Y,
                      batch_size=32,
                      epochs=12,
                      validation_split=0.3,
                      callbacks=[tensorboard])

# %%
model.save('catdog\\models\\catdog-64x4')

# %%
Exemple #10
0
    def train(self,
              download_dir: str = '/tmp/seq2seq/downloads/',
              cache_dir: Optional[str] = '/tmp/seq2seq/cache/',
              log_dir: Optional[str] = '/tmp/seq2seq/logs/',
              save_dir: Optional[str] = '/tmp/seq2seq/models',
              epochs: int = 1,
              optimizer: str = 'adam',
              glove_n_components: int = 10,
              gaussian_noise: float = 0,
              capacity: int = 1,
              val_split: float = 0.1,
              test_split: float = 0.2,
              l2: float = 0.01,
              progress: Optional[Callable] = None):

        self.settings['seq2seq'] = {  # type: ignore
            'epochs': epochs,
            'optimizer': optimizer,
            'n_components': glove_n_components,
            'gaussian_noise': gaussian_noise,
            'capacity': capacity,
            'val_split': val_split,
            'test_split': test_split,
            'l2': l2,
        }

        embedding_input = layers.Input((None, glove_n_components),
                                       name='tokens',
                                       dtype=tf.float32)
        tfidf_input = layers.Input((None, 1), name='tfidf', dtype=tf.float32)
        keyphraseness_input = layers.Input((None, 1),
                                           name='keyphraseness',
                                           dtype=tf.float32)
        pos_input = layers.Input((None, 35), name='pos', dtype=tf.float32)

        net_inputs = (embedding_input, tfidf_input, keyphraseness_input,
                      pos_input)

        if 0 < gaussian_noise < 1:
            embedding_input = layers.GaussianNoise(gaussian_noise)(
                embedding_input)

        net = layers.Concatenate()(
            [embedding_input, tfidf_input, keyphraseness_input, pos_input])
        net = layers.Bidirectional(
            layers.GRU(16 * capacity, return_sequences=True))(net)
        net = layers.Bidirectional(
            layers.GRU(16 * capacity, return_sequences=True))(net)
        net = layers.Bidirectional(
            layers.GRU(16 * capacity, return_sequences=True))(net)
        net = layers.Dense(32 * capacity,
                           activation='relu',
                           kernel_regularizer=regularizers.l2(l2))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Dense(32 * capacity,
                           activation='relu',
                           kernel_regularizer=regularizers.l2(l2))(net)
        net = layers.BatchNormalization()(net)
        prediction_layer = layers.Dense(1, activation='sigmoid')(net)

        self.model = models.Model(inputs=net_inputs, outputs=prediction_layer)

        self.model.compile(optimizer=optimizer,
                           loss='binary_crossentropy',
                           metrics=['acc'],
                           sample_weight_mode='temporal')

        self.model.summary()

        (x_train,
         y_train), validation_data, (x_test, y_test) = self._create_data(
             download_dir=download_dir,
             glove_n_components=glove_n_components,
             val_split=val_split,
             test_split=test_split,
             cache_dir=cache_dir or '.',
             progress=progress)

        class_count_counter = Counter(int(a) for b in y_train for a in b)
        print("Training set class counts", class_count_counter)

        num_classes = max(class_count_counter.keys()) + 1
        print('Num classes', num_classes)

        class_counts = np.zeros(num_classes, dtype=int)
        for cl, count in class_count_counter.items():
            class_counts[cl] = count

        self.class_weights = -np.log(class_counts / np.sum(class_counts))
        print("Training class weights", self.class_weights)

        sample_weights = np.array([[self.class_weights[y2] for y2 in y1]
                                   for y1 in y_train.squeeze()],
                                  dtype=float)
        print('Training sample weights:', sample_weights.shape)

        cb = []
        if log_dir:
            tb = callbacks.TensorBoard(log_dir,
                                       write_graph=True,
                                       histogram_freq=5)
            cb.append(tb)

        try:
            self.fit_result = self.model.fit(epochs=epochs,
                                             x=x_train,
                                             y=y_train,
                                             sample_weight=sample_weights,
                                             validation_data=validation_data,
                                             callbacks=cb)
        except KeyboardInterrupt:
            print('Model fit() interrupted by user input.')

        test_sample_weights = np.array([[self.class_weights[y2] for y2 in y1]
                                        for y1 in y_test.squeeze()],
                                       dtype=float)

        print('Evaluation of test samples')
        self.evaluation = self.model.evaluate(
            x_test, y_test, sample_weight=test_sample_weights)

        if save_dir:
            os.makedirs(save_dir, exist_ok=True)
            now = datetime.datetime.now()
            filename = now.strftime('%y-%m-%d.%H:%M:%S.model')
            filepath = os.path.join(save_dir, filename)
            print('Saving model to', filepath)
            self.save(filepath)
def train_deepdrug(batch_size,
                   lr,
                   epoch,
                   input,
                   log_dir,
                   k_fold=10,
                   augmentation=False):

    # optimize gpu memory usage 
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    assert len(physical_devices) > 0
    tf.config.experimental.set_memory_growth(physical_devices[0], True)
    
    # load the data
    with open(input, "rb") as f:
        grids_dict = pk.load(f)
    grids = list(grids_dict.values())

    def get_label(key):
        if "active" in key: return 1
        else: return 0

    labels = list(map(get_label, grids_dict.keys()))
    
    # shuffle input
    idx_list = list(range(len(labels)))
    rd.shuffle(idx_list)
    grids = list(np.array(grids)[idx_list])
    labels = list(np.array(labels)[idx_list])
    # print("grids type:", type(grids))
    # print("grid type:", type(grids[0]), "shape:", grids[0].shape)
    # print("labels type:", type(labels))
    # print("label type:", type(labels[0]), "shape:", labels[0].shape)

    histories = list()
    timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())
    for k in range(k_fold):
        tf.keras.backend.clear_session()
        # get training data with respect to the kth fold
        x, y, val_x, val_y = split_dataset_with_kfold(grids, labels, k, k_fold)

        # to balance different classes
        sample_weights = compute_sample_weight('balanced', y)

        val_data = (val_x, val_y)

        # build & compile model
        mdl = DeepDrug3DBuilder.build(classes=1)
        adam = Adam(
            lr=lr, beta_1=0.9, beta_2=0.999, epsilon=None,
            decay=0.0, amsgrad=False)
        loss = "binary_crossentropy"
        metric = "binary_accuracy"
        auc_metric = tf.keras.metrics.AUC()
        precision_metric = tf.keras.metrics.Precision()
        recall_metric = tf.keras.metrics.Recall()
        mdl.compile(
            optimizer=adam, loss=loss,
            metrics=[metric, auc_metric, precision_metric, recall_metric])
        # callback function for model checking
        log_d = os.path.join(log_dir, timestamp, "fold_{}".format(k))
        os.makedirs(log_d, exist_ok=True)
        tfCallBack = callbacks.TensorBoard(log_dir=log_d)
        history = mdl.fit(
            x, y, epochs = epoch, batch_size = batch_size,
            sample_weight=sample_weights, validation_data=val_data,
            shuffle = True, callbacks = [tfCallBack])
        histories.append(history)

    val_accs = list()
    val_aucs = list()
    val_precisions = list()
    val_recalls = list()
    for his in histories:
        try:
            val_accs.append(his.history["val_binary_accuracy"])
        except KeyError:
            val_accs.append(his.history["val_categorical_accuracy"])
        val_aucs.append(his.history["val_auc"])
        val_precisions.append(his.history["val_precision"])
        val_recalls.append(his.history["val_recall"])

    # find best epoch
    val_accs = np.array(val_accs)
    avgs = np.mean(val_accs, axis=0)
    best_epoch = np.argmax(avgs)
    # get the accuracy and standard deviation of the best epoch
    max_accs = val_accs[:, best_epoch]
    acc_avg = np.mean(max_accs)
    acc_std = np.std(max_accs)
    # get the auc score of the best epoch
    max_aucs = np.array(val_aucs)[:, best_epoch]
    auc_avg = np.mean(max_aucs)
    auc_std = np.std(max_accs)
    # get the precision, racall, f1 score of the best epoch
    max_precisions = np.array(val_precisions)[:, best_epoch]
    precision_avg = np.mean(max_precisions)
    precision_std = np.std(max_precisions)
    max_recalls = np.array(val_recalls)[:, best_epoch]
    recall_avg = np.mean(max_recalls)
    recall_std = np.std(max_recalls)
    max_f1s = 2 * max_precisions * max_recalls / (max_precisions + max_recalls)
    f1_avg = np.mean(max_f1s)
    f1_std = np.std(max_f1s)
    # print and save the training results
    print(
        "{}-fold cross validation performs the best "
        "at epoch {}".format(k_fold, best_epoch))
    print("Accuracy is {} +- {}".format(acc_avg, acc_std))
    print("AUC ROC is {} +- {}".format(auc_avg, auc_std))
    print("Precision is {} +- {}".format(precision_avg, precision_std))
    print("Recall is {} +- {}".format(recall_avg, recall_std))
    print("F1 score is {} +- {}".format(f1_avg, f1_std))
    print()
    with open(os.path.join(log_dir, timestamp, "readme"), "w") as f:
        print("dataset: {}".format(os.path.basename(input)), file=f)
        print("batch size: {}".format(batch_size), file=f)
        print("learning rate: {}".format(lr), file=f)
        print("epochs: {}".format(epoch), file=f)
        print("validation folds: {}".format(k_fold), file=f)
        print(
            "{}-fold cross validation performs the best "
            "at epoch {}".format(k_fold, best_epoch),
            file=f)
        print("Accuracy is {} +- {}".format(acc_avg, acc_std), file=f)
        print("AUC ROC is {} +- {}".format(auc_avg, auc_std), file=f)
        print("Precision is {} +- {}".format(
            precision_avg, precision_std), file=f)
        print("Recall is {} +- {}".format(recall_avg, recall_std), file=f)
        print("F1 score is {} +- {}".format(f1_avg, f1_std), file=f)
 def _internal_fit(self,
                   x_train,
                   y_train,
                   validation_x=None,
                   validation_y=None):
     """Fit internal model with given data."""
     # As a new model will be trained, remove old stateful model, if any
     self._inference_model = None
     self._inference_batch_size = None
     # x_train data shape is (samples, steps, lags, series)
     if self._options.sequential_mini_step > 0:
         # In this case, we always perform one step prediction, so ignore
         # other steps of y_train for training, and use them only for eval.
         # Note we duplicate y_train in memory and don't overwrite it
         y_train = y_train[:, :self._options.sequential_mini_step, :]
         if validation_y is not None:
             validation_y = validation_y[:, :self._options.
                                         sequential_mini_step, :]
     if self._options.nn_use_variable_sigma:
         # Note we add a dummy output that is ignored by metrics. It is
         # because metrics need same input size in prediction and y_train.
         # Sigma predictions are ignored (except for the loss).
         # This duplicates y_train in memory, but it does not overwrite it
         y_train = np.stack([y_train, np.zeros(y_train.shape)], axis=1)
         if validation_y is not None:
             validation_y = np.stack(
                 [validation_y, np.zeros(validation_y.shape)], axis=1)
         metrics = [
             util.sigma_mean_squared_error, util.sigma_mean_absolute_error,
             util.sigma_mean_absolute_percentage_error
         ]
     else:
         metrics = [
             losses.mean_squared_error, losses.mean_absolute_error,
             losses.mean_absolute_percentage_error
         ]
     # We create model here
     input_layer = layers.Input(shape=x_train.shape[1:])
     last_layer = self._create_net(input_layer)
     self._model = models.Model(inputs=input_layer, outputs=last_layer)
     optimizer = getattr(
         optimizers,
         self._options.nn_optimizer)(lr=self._options.nn_learning_rate)
     self._model.compile(loss=self._loss(),
                         optimizer=optimizer,
                         metrics=metrics)
     logging.info(self._model.summary())
     validation_data = None
     calls = None
     if validation_x is not None:
         validation_data = (validation_x, validation_y)
         if self._options.nn_patience >= 0:
             if self._options.flow_use_temperature:
                 calls = [
                     CustomStopper(
                         monitor='val_loss',
                         patience=self._options.nn_patience,
                         restore_best_weights=True,
                         start_epoch=self._options.flow_temperature_steps)
                 ]
             else:
                 calls = [
                     callbacks.EarlyStopping(
                         monitor='val_loss',
                         patience=self._options.nn_patience,
                         restore_best_weights=True)
                 ]
     additional_calls = self._callbacks()
     if self._options.nn_tensorboard:
         tb_call = callbacks.TensorBoard(log_dir="./logs")
         if additional_calls is None:
             additional_calls = [tb_call]
         else:
             additional_calls.append(tb_call)
     if calls is None:
         calls = additional_calls
     elif additional_calls is not None:
         calls += additional_calls
     self._model.fit(x=x_train,
                     y=y_train,
                     validation_data=validation_data,
                     epochs=self._options.nn_epochs,
                     callbacks=calls,
                     batch_size=self._options.nn_batch_size)
     # Store real number of epochs where it stopped
     self._stopped_epoch = None
     if self._options.nn_patience and calls:
         for one_call in calls:
             if hasattr(one_call, 'stopped_epoch'):
                 self._stopped_epoch = (one_call.stopped_epoch -
                                        self._options.nn_patience)
     if self._stopped_epoch is None:
         self._stopped_epoch = self._options.nn_epochs
     if not self._options.lstm_stateful:
         # If not stateful prediction, then use same model for inference
         self._inference_model = self._model
         self._inference_batch_size = self._options.nn_batch_size
train_generator = DataGenerator(img_root=imgs_path, list_IDs=train_img_names, labels=train_labels,
                                batch_size=batch_size, label_max_length=max_labels)
test_generator = DataGenerator(img_root=imgs_path, list_IDs=test_img_names, labels=test_labels,
                               batch_size=batch_size, label_max_length=max_labels)


# In[ ]:


checkpoint = callbacks.ModelCheckpoint("./models/efb7-val_loss.{val_loss:.2f}.h5", monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
# checkpoint = callbacks.ModelCheckpoint("./models/loss.{loss:.2f}-acc.{acc:.2f}-val_loss.{val_loss:.2f}-val_acc.{val_acc:.2f}.h5", monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
# early = EarlyStopping(monitor='val_acc', min_delta=0, patience=3, verbose=1, mode='auto')
# reduce_lr = callbacks.ReduceLROnPlateau(monitor='acc', factor=0.1, patience=2, min_lr=0.000001)

learningRateScheduler = callbacks.LearningRateScheduler(lr_decay)
tensorboard = callbacks.TensorBoard(log_dir='./logs_b7')


# In[ ]:


history = model.fit_generator(generator=train_generator,  
                                    steps_per_epoch=ceil(len(train_labels) / batch_size),
                                    validation_data=test_generator, 
                                    validation_steps=ceil(len(test_labels) / batch_size),
                                    epochs=nb_epochs,
                                    callbacks = [checkpoint, tensorboard, learningRateScheduler],
                                    use_multiprocessing=True,
                                    workers=6, verbose=1)

    X_test = np.load("../audio_data/audio_test.npy")
    Y_train = pd.read_csv("../audio_data/labels_train.csv")

    # Simple preprocessing
    X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
    X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)
    Y_train = np.array(Y_train)[:, -1] # set(Y_train) = {0, 1, 3, 4, 5, 6, 7, 8, 9}
    # Y_train = utils.to_categorical(Y_train) for "categorical_crossentropy" loss.
    print("The shape of X_train\X_test\Y_train: ", X_train.shape, X_test.shape, Y_train.shape)

    # Build 1-D CNN
    input_shape = (X_train.shape[1], 1)
    n_classes = int(Y_train.max()+1) # 10
    model = CNN_1d(input_shape, n_classes, opt='adam')
    model.summary()

    # Train the model
    tbCallBack = callbacks.TensorBoard(log_dir="./runs/TB_cnn1d")
    model.fit(X_train, Y_train, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS,
                            validation_split=0.2, callbacks=[tbCallBack])
    #model.save("./runs/checkpoint_cnn1d")

    # Generate predicted label
    #models.load_model("./runs/checkpoint_cnn1d")
    prob_table = model.predict(X_test)
    Y_pred = backend.argmax(prob_table)
    print("The shape of Y_pred: ", Y_pred.shape)

    # Make submission file
    make_submission(Y_pred, "submission")
                                          image_shape=image_shape,
                                          batch_size=batch_size)

    validation_generator = get_validation_generator(directory=validation_dir,
                                                    image_shape=image_shape,
                                                    batch_size=batch_size)

    model = get_model(image_shape=image_shape, Net=Net)

    print(model.summary())

    if gpu_count > 1:
        model = multi_gpu_model(model, gpus=gpu_count)

    tensorboard_cb = callbacks.TensorBoard(
        log_dir=log_dir,
        histogram_freq=1,
    )

    early_stopping_cb = callbacks.EarlyStopping(monitor='val_loss',
                                                patience=10,
                                                verbose=0,
                                                mode='min')

    checkpoint_path = 'model.h5'

    checkpoint_cb = callbacks.ModelCheckpoint(
        checkpoint_path,
        save_best_only=True,
        monitor=
        'val_accuracy'  # for tensorflow 2 change to val_accuracy, for tensorflow 1 change to val_acc
    )
def start_pong():
    UP_ACTION = 2
    DOWN_ACTION = 3
    gamma = 0.99
    x_train = []
    y_train = []
    rewards = []
    reward_sum = 0
    episode_num = 0
    POSITIVE = 1
    NEGATIVE = 0
    running_reward = None

    env = gym.make('Pong-v0')
    observation = env.reset()
    prev_input = None
    model = get_model()
    tbCallBack = callbacks.TensorBoard(log_dir=log_dir,
                                       histogram_freq=0,
                                       write_graph=True,
                                       write_images=True)

    while True:
        # preproces observation  and set input x_train input as difference between two images(frames)

        current_input = prepro(observation)
        x_diff = current_input - prev_input if prev_input is not None else np.zeros(
            80 * 80)
        prev_input = current_input

        # use policy network to action to get what model thinks about UP action
        prediction = model.predict(np.expand_dims(x_diff, axis=1).T)

        action = UP_ACTION if np.random.uniform() < prediction else DOWN_ACTION

        # print('chosen action:', action)
        y_diff = POSITIVE if action == UP_ACTION else NEGATIVE
        # print('x frame', x_diff)

        # save results to training set
        x_train.append(x_diff)
        y_train.append(y_diff)

        # make next action step
        observation, reward, done, info = env.step(action)
        # print('observation', observation)
        # print('reward', reward)
        # print('done', done)
        # print('info', info)

        # log rewards data
        rewards.append(reward)

        # log rewards sum
        reward_sum += reward
        env.render()

        if done:
            print('End of episode:', episode_num, 'Total reward: ', reward_sum)

            # increment nomber of episode
            episode_num += 1

            # make a model training with episode results - stack training data and labels. Set training data weights
            # regarding rewards received per episode
            model.fit(
                x=np.vstack(x_train),
                y=np.vstack(y_train),
                verbose=0,
                # callbacks=[tbCallBack],
                sample_weight=discount_rewards(rewards, gamma))

            running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
            # tflog('running_reward', running_reward)

            save_model(model)

            # reinitialisation learning data
            x_train = []
            y_train = []
            rewards = []
            reward_sum = 0
            prev_input = None
            # reset game* (restart env)
            observation = env.reset()
# In[ ]:

checkpoint = callbacks.ModelCheckpoint(
    "./models_transform/epoch.{epoch:03d}-loss.{loss:.2f}-val_loss.{val_loss:.2f}-b7.h5",
    monitor='val_loss',
    verbose=0,
    save_best_only=True,
    save_weights_only=True,
    mode='auto',
    period=1)
# checkpoint = callbacks.ModelCheckpoint("./models/loss.{loss:.2f}-acc.{acc:.2f}-val_loss.{val_loss:.2f}-val_acc.{val_acc:.2f}.h5", monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
# early = EarlyStopping(monitor='val_acc', min_delta=0, patience=3, verbose=1, mode='auto')
# reduce_lr = callbacks.ReduceLROnPlateau(monitor='acc', factor=0.1, patience=2, min_lr=0.000001)

learningRateScheduler = callbacks.LearningRateScheduler(poly_decay)
tensorboard = callbacks.TensorBoard(log_dir='./logs_transform')

# In[ ]:

model.fit_generator(generator=train_generator,
                    steps_per_epoch=ceil(len(train_labels) / batch_size),
                    validation_data=test_generator,
                    validation_steps=ceil(len(test_labels) / batch_size),
                    epochs=nb_epochs,
                    callbacks=[checkpoint, tensorboard, learningRateScheduler],
                    use_multiprocessing=True,
                    workers=6,
                    verbose=1)

# In[ ]:
Exemple #18
0
    def _build_callback_hooks(
        self,
        models_dir: str,
        logs_dir: Optional[str] = None,
        is_training=True,
        logging_frequency=25,
        monitor_metric: str = "",
        monitor_mode: str = "",
        patience=2,
    ):
        """
        Build callback hooks for the training and evaluation loop

        Parameters
        ----------
        models_dir : str
            Path to directory to save model checkpoints
        logs_dir : str
            Path to directory to save tensorboard logs
        is_training : bool, optional
            Whether we are building callbacks for training or evaluation
        logging_frequency : int, optional
            How often, in number of epochs, to log training and evaluation progress
        monitor_metric : str, optional
            Name of metric to be used for ModelCheckpoint and EarlyStopping callbacks
        monitor_mode : {"max", "min"}, optional
            Mode for maximizing or minimizing the ModelCheckpoint and EarlyStopping
        patience : int, optional
            Number of epochs to wait before early stopping if metric change is below tolerance

        Returns
        -------
        callbacks_list : list
            List of callbacks to be used with the RelevanceModel training and evaluation
        """
        callbacks_list: list = list()

        if is_training:
            # Model checkpoint
            if models_dir and monitor_metric:
                checkpoints_path = os.path.join(
                    models_dir, RelevanceModelConstants.CHECKPOINT_FNAME
                )
                cp_callback = callbacks.ModelCheckpoint(
                    filepath=checkpoints_path,
                    save_weights_only=False,
                    verbose=1,
                    save_best_only=True,
                    mode=monitor_mode,
                    monitor=monitor_metric,
                )
                callbacks_list.append(cp_callback)

            # Early Stopping
            if monitor_metric:
                early_stopping_callback = callbacks.EarlyStopping(
                    monitor=monitor_metric,
                    mode=monitor_mode,
                    patience=patience,
                    verbose=1,
                    restore_best_weights=True,
                )
                callbacks_list.append(early_stopping_callback)

        # TensorBoard
        if logs_dir:
            tensorboard_callback = callbacks.TensorBoard(
                log_dir=logs_dir, histogram_freq=1, update_freq=5
            )
            callbacks_list.append(tensorboard_callback)

        # Debugging/Logging
        callbacks_list.append(DebuggingCallback(self.logger, logging_frequency))

        # Add more here

        return callbacks_list
def main(batch_size,
        training_dir,
        checkpoint_dir,
        epochs,
        n_fixed_layers,
        logger_filename,
        weight_file,
        class_weight):

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
    sess = tf.Session(config=config)
    tf.keras.backend.set_session(sess)
    df_train = pd.read_csv("dataset/training.csv")
    df_train = df_train.sample(frac=1,random_state=42)
    df_val = pd.read_csv("dataset/validation.csv")

    #3 using subset of training data
    df_train['Filename'] = training_dir+"/"+df_train['Filename'].astype(str)
    df_val['Filename'] = training_dir+"/"+df_val['Filename'].astype(str)
    #df_train = df_train[:100]
    #df_val = df_val[:100]
    generator = preprocess.tfdata_generator(df_train['Filename'].values,
                                        df_train['Drscore'].values,
                                        is_training=True,
                                        buffer_size=50,
                                        batch_size=batch_size)

    validation_generator = preprocess.tfdata_generator(df_val['Filename'].values,
                                        df_val['Drscore'].values,
                                        is_training=False,
                                        buffer_size=50,
                                        batch_size=batch_size)

    ## various callbacks
    tensorboard_cbk = callbacks.TensorBoard(log_dir=checkpoint_dir,
                                                    update_freq='epoch',
                                                    write_grads=False,
                                                    histogram_freq=0)
    checkpoint_cbk = callbacks.ModelCheckpoint(
        filepath=os.path.join(checkpoint_dir,'weights-{epoch:03d}.hdf5'),
        save_best_only=True,
        monitor='val_loss',
        verbose=1,
        save_weights_only=False)

    earlystop_ckb = callbacks.EarlyStopping(monitor='val_loss',
                        patience=5,
                        restore_best_weights=False)
    csv_callback = callbacks.CSVLogger(os.path.join(checkpoint_dir,logger_filename),append=True)

    reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5,
                              patience=3, min_lr=1e-6)
    lr_scheduler = callbacks.LearningRateScheduler(step_decay)

    model,base_model = create_model()

    ## freeze upper layers
    files = sorted(glob(os.path.join(checkpoint_dir, 'weights-*.hdf5')))
    if weight_file:
        model_file = weight_file
        initial_epoch = int(model_file[-8:-5])
        print('Resuming using saved model %s.' % model_file)
        model = tf.keras.models.load_model(model_file)
    elif files:
        model_file = files[-1]
        initial_epoch = int(model_file[-8:-5])
        print('Resuming using saved model %s.' % model_file)
        model = tf.keras.models.load_model(model_file)
    else:
        #model,base_model = create_model()
        initial_epoch = 0

    if n_fixed_layers:
        for layer in base_model.layers[:n_fixed_layers]:
            layer.trainable = False
        for layer in base_model.layers[n_fixed_layers:]:
            layer.trainable = True
            print("training layer {}".format(layer.name))
    if class_weight:
        class_weights=compute_class_weight('balanced',
                       np.unique(df_train["Drscore"].values),
                       df_train["Drscore"].values)
        weight_dict = dict([(i,class_weights[i]) for i in range(len(class_weights))])
    else:
        weight_dict=None
    model.fit(
        generator,
        epochs=epochs,
        initial_epoch=initial_epoch,
        steps_per_epoch=df_train.shape[0]//batch_size,
        verbose=1,
        validation_data=validation_generator,
        validation_steps=df_val.shape[0]//batch_size,
        class_weight=np.array(class_weights),
        callbacks=[tensorboard_cbk,
                    checkpoint_cbk,
                    csv_callback,
                    reduce_lr])
Exemple #20
0
            self.batch_size,
            triplet_selector).make_one_shot_iterator().get_next())
        self.summary_writer = tf.summary.FileWriter("tb/%s" % opts.run)

    def on_epoch_end(self, epoch, logs):
        # TODO: how do we just use the models iterator here? don't care
        # that it's "wastes" examples doing this eval, it's all generator
        # based anyways...
        next_egs = self.sess.run(self.examples)
        sess = tf.keras.backend.get_session()
        summaries = sess.run(loss_fn.summaries, feed_dict={inputs: next_egs})
        #        percentage_non_zero = np.count_nonzero(per_elem_loss) / self.batch_size
        # log stats
        for summary in summaries:
            self.summary_writer.add_summary(summary, global_step=epoch)
        self.summary_writer.flush()


callbacks = [
    callbacks.ModelCheckpoint(filepath="runs/%s/model.{epoch}.hdf5" %
                              opts.run),
    callbacks.TensorBoard(log_dir="tb/%s" % opts.run),
    callbacks.TerminateOnNaN(),
    NumZeroLossCB()
]
model.fit(examples,
          epochs=opts.epochs,
          verbose=1,
          steps_per_epoch=opts.steps_per_epoch,
          callbacks=callbacks)
Exemple #21
0
def train_test_model(logdir, hparams):
    filter_kernel_2 = json.loads(hparams['filter_kernel_2'])
    units = json.loads(hparams['units'])

    # C3 Convolution.
    c3_input = keras.Input(shape=(x_train_c3[0].shape[0],
                                  x_train_c3[0].shape[1], 1),
                           name='c3_input')
    c3_model = layers.Conv2D(filters=int(hparams['filter_1']),
                             kernel_size=int(hparams['kernel_1']),
                             activation='relu')(c3_input)
    c3_model = layers.MaxPooling2D(pool_size=2)(c3_model)
    c3_model = layers.Dropout(hparams['dropout'])(c3_model)
    if int(filter_kernel_2[0]) > 0:
        c3_model = layers.Conv2D(filters=int(filter_kernel_2[0]),
                                 kernel_size=int(filter_kernel_2[1]),
                                 activation='relu')(c3_model)
        c3_model = layers.MaxPooling2D(pool_size=2)(c3_model)
        c3_model = layers.Dropout(hparams['dropout'])(c3_model)
    c3_model = layers.Flatten()(c3_model)

    # C4 Convolution.
    c4_input = keras.Input(shape=(x_train_c3[0].shape[0],
                                  x_train_c3[0].shape[1], 1),
                           name='c4_input')
    c4_model = layers.Conv2D(filters=int(hparams['filter_1']),
                             kernel_size=int(hparams['kernel_1']),
                             activation='relu')(c4_input)
    c4_model = layers.MaxPooling2D(pool_size=2)(c4_model)
    c4_model = layers.Dropout(hparams['dropout'])(c4_model)
    if int(filter_kernel_2[0]) > 0:
        c4_model = layers.Conv2D(filters=int(filter_kernel_2[0]),
                                 kernel_size=int(filter_kernel_2[1]),
                                 activation='relu')(c4_model)
        c4_model = layers.MaxPooling2D(pool_size=2)(c4_model)
        c4_model = layers.Dropout(hparams['dropout'])(c4_model)
    c4_model = layers.Flatten()(c4_model)

    # Dense concatenation.
    model = layers.Concatenate()([c3_model, c4_model])
    for unit in units:
        model = layers.Dense(unit, activation='relu')(model)
        model = layers.Dropout(hparams['dropout'])(model)
    model = layers.Dense(1, activation='sigmoid')(model)

    model = tf.keras.models.Model(inputs=[c3_input, c4_input], outputs=model)
    model.summary()
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    plot_model(model,
               logdir + '/model.png',
               show_shapes=True,
               show_layer_names=False)
    model.compile(optimizer=optimizers.Adam(learning_rate=hparams['lr'],
                                            decay=0.001),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    cb = [
        callbacks.TensorBoard(log_dir=logdir),
        hp.KerasCallback(logdir, hparams)
    ]

    history = model.fit({
        'c3_input': x_train_c3,
        'c4_input': x_train_c4
    },
                        y_train,
                        validation_data=({
                            'c3_input': x_test_c3,
                            'c4_input': x_test_c4
                        }, y_test),
                        batch_size=64,
                        epochs=500,
                        callbacks=cb,
                        verbose=0)
    return model, history
Exemple #22
0
from tensorflow.keras import callbacks

cs = [
    callbacks.TerminateOnNaN(),
    callbacks.ModelCheckpoint(Config.log.tensorboard + '/weights.h5',
                              save_best_only=True,
                              save_weights_only=True,
                              verbose=1),
    callbacks.ReduceLROnPlateau(
        patience=Config.training.reduce_lr_on_plateau_pacience,
        factor=Config.training.reduce_lr_on_plateau_factor,
        verbose=1),
    callbacks.EarlyStopping(patience=Config.training.early_stopping_patience,
                            verbose=1),
    callbacks.TensorBoard(Config.log.tensorboard, write_graph=False)
]

try:
    disc.fit(train_ds,
             validation_data=val_ds,
             epochs=Config.training.epochs,
             initial_epoch=0,
             callbacks=cs)
except KeyboardInterrupt:
    print('stopped')

# In[ ]:

disc.load_weights(Config.log.tensorboard + '/weights.h5')
Exemple #23
0
        return model

    model = model_final(preproc_source_sentences.shape,
                        preproc_target_sentences.shape[1],
                        len(source_tokenizer.word_index) + 1,
                        len(target_tokenizer.word_index) + 1)
    model.summary()

    #CallBacks
    mfile = 'models/Glove_training_bach32.model.h5'
    model_checkpoint = callbacks.ModelCheckpoint(mfile,
                                                 monitor='accuracy',
                                                 save_best_only=True,
                                                 save_weights_only=True)
    logger = callbacks.CSVLogger('results/training_bach_32.log')
    tensorboard = callbacks.TensorBoard(log_dir='results/training_bach_32')
    callbacks = [logger, tensorboard]

    #Training model and save callbacks:
    #model.fit(X_train, Y_train, batch_size=1024, epochs=25, validation_split=0.1, callbacks=callbacks)

    #Training model and save callbacks:
    model.fit(X_train,
              Y_train,
              batch_size=32,
              epochs=10,
              validation_split=0.01)

    Predicted_by_Glove = model.predict(X_test, len(X_test))

    #Save Model
Exemple #24
0
def scheduler(epoch):
    if epoch < 10:
        return lr
    else:
        return lr * tf.math.pow(0.9, epoch-10)  # tf.math.pow(0.9, 10)=0.35, tf.math.pow(0.9, 20)=0.122, tf.math.pow(0.9, 50)=0.005

learningRateScheduler_poly = callbacks.LearningRateScheduler(poly_decay)
learningRateScheduler_scheduler = callbacks.LearningRateScheduler(scheduler)

ReduceLROnPlateau = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.000001)

logs_dir = f'logs/{datetime.date.today()}'
if not os.path.exists(logs_dir):
    os.makedirs(logs_dir) 
tensorboard = callbacks.TensorBoard(log_dir=logs_dir)

checkpoint = callbacks.ModelCheckpoint(
    os.path.join(checkpoints_dir, 'tf_yolov3_{epoch:02d}_{loss:.4f}_{val_loss:.4f}_weights.h5'),
    verbose=1, save_weights_only=True, save_best_only=True, monitor='val_loss', mode='auto'
)

model.fit_generator(
    generator=train_generator,
    steps_per_epoch=ceil(len(train_list_IDs) // batch_size),
    initial_epoch=0,
    epochs=Epochs,
#     callbacks=[learningRateScheduler, checkpoint, tensorboard],
    callbacks=[ReduceLROnPlateau, checkpoint, tensorboard],
    validation_data=val_generator,
    validation_steps=ceil(len(test_list_IDs) // batch_size),
Exemple #25
0
def train_deepdrug(batch_size,
                   lr,
                   epoch,
                   output,
                   k_fold=10,
                   subset="all",
                   suffix="resigrids"):
    # optimize gpu memory usage
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    assert len(physical_devices) > 0
    tf.config.experimental.set_memory_growth(physical_devices[0], True)

    # load the data
    with open("../data/tough_c1/control-pocket." + suffix, "rb") as f:
        grids = list(pk.load(f).values())
    labels = [np.array([0])] * len(grids)
    if subset == "nucleotide":
        with open("../data/tough_c1/nucleotide-pocket." + suffix, "rb") as f:
            grids += list(pk.load(f).values())
        labels += [np.array([1])] * (len(grids) - len(labels))
        classes = 1
    elif subset == "heme":
        with open("../data/tough_c1/heme-pocket." + suffix, "rb") as f:
            grids += list(pk.load(f).values())
        labels += [np.array([1])] * (len(grids) - len(labels))
        classes = 1
    elif subset == "all":
        with open("../data/tough_c1/nucleotide-pocket." + suffix, "rb") as f:
            grids += list(pk.load(f).values())
        labels += [np.array([1])] * (len(grids) - len(labels))
        with open("../data/tough_c1/heme-pocket." + suffix, "rb") as f:
            grids += list(pk.load(f).values())
        labels += [np.array([2])] * (len(grids) - len(labels))
        with open("../data/tough_c1/steroid-pocket." + suffix, "rb") as f:
            grids += list(pk.load(f).values())
        labels += [np.array([3])] * (len(grids) - len(labels))
        classes = 4
    else:
        print("Invalid subset name. Please choose from all/nucleotide/heme.")
        return
    # shuffle input
    idx_list = list(range(len(labels)))
    rd.shuffle(idx_list)
    grids = list(np.array(grids)[idx_list])
    labels = list(np.array(labels)[idx_list])
    # print("grids type:", type(grids))
    # print("grid type:", type(grids[0]), "shape:", grids[0].shape)
    # print("labels type:", type(labels))
    # print("label type:", type(labels[0]), "shape:", labels[0].shape)

    histories = list()
    timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())
    for k in range(k_fold):
        tf.keras.backend.clear_session()
        # get training data with respect to the kth fold
        x, y, val_x, val_y = split_dataset_with_kfold(grids, labels, k, k_fold)

        if classes > 1:
            y = to_categorical(y, num_classes=classes)
            val_y = to_categorical(val_y, num_classes=classes)

        # to balance different classes
        sample_weights = compute_sample_weight('balanced', y)

        val_data = (val_x, val_y)
        # build & compile model
        mdl = DeepDrug3DBuilder.build(classes=classes)
        adam = Adam(lr=lr,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=None,
                    decay=0.0,
                    amsgrad=False)
        loss = "binary_crossentropy" if classes==1 \
            else "categorical_crossentropy"
        metric = "binary_accuracy" if classes == 1 else "categorical_accuracy"
        auc_metric = tf.keras.metrics.AUC()
        precision_metric = tf.keras.metrics.Precision()
        recall_metric = tf.keras.metrics.Recall()
        mdl.compile(
            optimizer=adam,
            loss=loss,
            metrics=[metric, auc_metric, precision_metric, recall_metric])
        # callback function for model checking
        log_dir = os.path.join('training_logs', timestamp, "fold_{}".format(k))
        os.makedirs(log_dir, exist_ok=True)
        tfCallBack = callbacks.TensorBoard(log_dir=log_dir)
        history = mdl.fit(x,
                          y,
                          epochs=epoch,
                          batch_size=batch_size,
                          sample_weight=sample_weights,
                          validation_data=val_data,
                          shuffle=True,
                          callbacks=[tfCallBack])
        histories.append(history)

    # log the metrics after each cross-validation training
    val_accs = list()
    val_aucs = list()
    val_precisions = list()
    val_recalls = list()
    for his in histories:
        try:
            val_accs.append(his.history["val_binary_accuracy"])
        except KeyError:
            val_accs.append(his.history["val_categorical_accuracy"])
        val_aucs.append(his.history["val_auc"])
        val_precisions.append(his.history["val_precision"])
        val_recalls.append(his.history["val_recall"])

    # find best epoch
    val_accs = np.array(val_accs)
    avgs = np.mean(val_accs, axis=0)
    best_epoch = np.argmax(avgs)
    # get the accuracy and standard deviation of the best epoch
    max_accs = val_accs[:, best_epoch]
    acc_avg = np.mean(max_accs)
    acc_std = np.std(max_accs)
    # get the auc score of the best epoch
    max_aucs = np.array(val_aucs)[:, best_epoch]
    auc_avg = np.mean(max_aucs)
    auc_std = np.std(max_accs)
    # get the precision, racall, f1 score of the best epoch
    max_precisions = np.array(val_precisions)[:, best_epoch]
    precision_avg = np.mean(max_precisions)
    precision_std = np.std(max_precisions)
    max_recalls = np.array(val_recalls)[:, best_epoch]
    recall_avg = np.mean(max_recalls)
    recall_std = np.std(max_recalls)
    max_f1s = 2 * max_precisions * max_recalls / (max_precisions + max_recalls)
    f1_avg = np.mean(max_f1s)
    f1_std = np.std(max_f1s)
    # print and save the training results
    print("{}-fold cross validation performs the best "
          "at epoch {}".format(k_fold, best_epoch + 1))
    print("Accuracy is {} +- {}".format(acc_avg, acc_std))
    print("AUC ROC is {} +- {}".format(auc_avg, auc_std))
    print("Precision is {} +- {}".format(precision_avg, precision_std))
    print("Recall is {} +- {}".format(recall_avg, recall_std))
    print("F1 score is {} +- {}".format(f1_avg, f1_std))
    print()
    with open(os.path.join("training_logs", timestamp, "readme"), "w") as f:
        print("dataset: {}".format(subset), file=f)
        print("grid type: {}".format(suffix), file=f)
        print("batch size: {}".format(batch_size), file=f)
        print("learning rate: {}".format(lr), file=f)
        print("epochs: {}".format(epoch), file=f)
        print("validation folds: {}".format(k_fold), file=f)
        print("{}-fold cross validation performs the best "
              "at epoch {}".format(k_fold, best_epoch + 1),
              file=f)
        print("Accuracy is {} +- {}".format(acc_avg, acc_std), file=f)
        print("AUC ROC is {} +- {}".format(auc_avg, auc_std), file=f)
        print("Precision is {} +- {}".format(precision_avg, precision_std),
              file=f)
        print("Recall is {} +- {}".format(recall_avg, recall_std), file=f)
        print("F1 score is {} +- {}".format(f1_avg, f1_std), file=f)
    def train(self,
              x_train_lstm,
              x_train_svm,
              y_train,
              x_val_lstm=None,
              x_val_svm=None,
              y_val=None,
              x_train_u=None,
              x_val_u=None,
              n_epochs=20,
              class_weight=None):

        logdir = os.path.join("tensorboard_log", self.detection,
                              self.save_model_name, self.num_set)
        cmd = "mkdir -p " + logdir
        os.system(cmd)
        cmd = "rm -r " + logdir + "/"
        os.system(cmd)
        tensorboard_callback = callbacks.TensorBoard(log_dir=logdir)

        h5_save_path = os.path.join(
            "Models", self.detection,
            self.model_name + "_" + self.num_set + ".h5")

        acc = []
        loss = []
        val_acc = []
        val_loss = []

        x_train_dict = {
            'lstm_features': x_train_lstm,
            'svm_features': x_train_svm,
            'attention_params': x_train_u
        }
        x_val_dict = {
            'lstm_features': x_val_lstm,
            'svm_features': x_val_svm,
            'attention_params': x_val_u
        }

        train_set = tensorflow.data.Dataset.from_tensor_slices(
            (x_train_dict, y_train)).shuffle(
                buffer_size=Config.BUFFER_SIZE).batch(32).prefetch(
                    tensorflow.data.experimental.AUTOTUNE)
        val_set = tensorflow.data.Dataset.from_tensor_slices(
            (x_val_dict, y_val)).shuffle(
                buffer_size=Config.BUFFER_SIZE).batch(32).prefetch(
                    tensorflow.data.experimental.AUTOTUNE)

        model_cp = callbacks.ModelCheckpoint(filepath=h5_save_path,
                                             monitor="val_loss",
                                             verbose=0,
                                             save_best_only=True,
                                             save_weights_only=False,
                                             mode="min",
                                             period=10)
        """
        history = self.model.fit(x = x_train_dict, 
                                 y = y_train, 
                                 shuffle = 1,
                                 validation_data = (x_val_dict, y_val),
                                 batch_size = Config.BATCH_SIZE, 
                                 epochs = Config.EPOCHS,
                                 class_weight = class_weight,
                                 callbacks = [model_cp, tensorboard_callback],
                                 use_multiprocessing = False)
        """
        history = self.model.fit(x=train_set,
                                 validation_data=val_set,
                                 epochs=Config.EPOCHS,
                                 verbose=1,
                                 class_weight=class_weight,
                                 callbacks=[model_cp, tensorboard_callback],
                                 use_multiprocessing=False)

        # 训练集上的损失值和准确率
        loss = history.history['loss']
        val_loss = history.history["val_loss"]

        figfile = os.path.join("Fig", self.detection,
                               self.model_name + "_" + self.num_set + ".png")
        pickle.dump(loss, open(figfile.split(".p")[0] + "_loss.cpickle", "wb"))
        pickle.dump(val_loss,
                    open(figfile.split(".p")[0] + "_val_loss.cpickle", "wb"))
        plotCurve(loss, val_loss, 'Model Loss', 'loss', figfile)
Exemple #27
0
with open(args.json_file) as f:
    params = json.load(f)
print("Params loaded: ", params)

train_data, test_data = load_imdb(params)

model = cnn_zhang(params)
model.summary()

model.compile(loss='binary_crossentropy',
              optimizer=tf.optimizers.Adam(learning_rate=params["lr"]),
              metrics=['accuracy'])

cb = []
cb.append(callbacks.TensorBoard(params["logs_dir"]))
checkp = callbacks.ModelCheckpoint(params["weights_dir"] + '/weights.h5',
                                   save_best_only=True,
                                   save_weights_only=True)
cb.append(checkp)

if os.path.exists(params["weights_dir"] + '/weights.h5'):
    print("Loading weights")
    model.load_weights(params["weights_dir"] + '/weights.h5')

if not os.path.exists(params["weights_dir"]):
    os.mkdir(params["weights_dir"])

model.fit(train_data,
          epochs=params["epochs"],
          validation_data=test_data,
Exemple #28
0
    map_feature = Lambda(reshape_query)(map_feature)
    pred = Lambda(lambda x:prior_dist(x))([out_feature, map_feature]) #negative distance
    combine = Model([sample, inp], pred)
    optimizer = Adam(0.001)
    combine.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['categorical_accuracy'])
    print(combine.summary())
    train_loader = DataGenerator(way=train_way, query=train_query, shot=shot, num_batch=1000)
    (x,y),z = train_loader[0]
    print(x.shape, y.shape, z.shape)
    val_loader = DataGenerator(data_type='val',way=val_way, shot=shot)
    (x,y),z = val_loader[0]
    print(x.shape, y.shape, z.shape)
    save_conv = SaveConv()
    reduce_lr = cb.ReduceLROnPlateau(monitor='val_loss', factor=0.4,patience=2, min_lr=1e-8)
    lr_sched = cb.LearningRateScheduler(scheduler)
    tensorboard = cb.TensorBoard()
    combine.fit_generator(train_loader,epochs=1000,validation_data=val_loader,  use_multiprocessing=False, workers=4, shuffle=False, callbacks=[save_conv, lr_sched, tensorboard])

    save_model(conv, "model/omniglot_conv")


# images, labels = zip(*list(loader('python/images_background')))
# images = np.expand_dims(images, axis=-1)
# images = np.repeat(images, repeats=3, axis=-1)
# print(images.shape)
# main_labels, sub_labels= [x[0] for x in labels], [x[1] for x in labels]
# encoder = LabelBinarizer()
# enc_main_labels = encoder.fit_transform(main_labels)
# output_num = len(np.unique(main_labels))
# bottleneck_model = conv_model()
# bottleneck_model.trainable = False
Exemple #29
0
    x_train, y_train = read_dataset(args.train_dataset_filename)

    validation_data = None
    if args.val_dataset_filename is not None:
        validation_data = read_dataset(args.val_dataset_filename)

    model = build_model()

    optimizer = build_optimizer()
    loss=build_loss()
    model.compile(loss=loss, optimizer=optimizer, metrics = args.metrics)
    model.summary()

    tf_callbacks = []
    if args.logsout_path:
        tf_callbacks.append(tfcb.TensorBoard(log_dir=args.logsout_path, histogram_freq=0, write_graph=True, write_images=True))
    if args.model_snapshots_path:
        tf_callbacks.append(EpochLogger())
    if args.best_model_monitor:
        tf_callbacks.append(tfcb.ModelCheckpoint(
            filepath = args.model_path,
            save_best_only = True,
            monitor = args.best_model_monitor,
            mode = 'auto',
            verbose=1))

    start_time = time.time()
    history = model.fit(
        x_train, y_train,
        epochs=args.epochs,
        batch_size=args.batch_size,
    layers.Dense(2, activation='softmax')
])

k_model.compile(optimizer='Adam',
                loss='sparse_categorical_crossentropy',
                metrics=['accuracy'])

filepath = os.path.join(dir, 'models/defaults_keras_model.hdf5')
log_dir = os.path.join(dir, 'logs/1')
callbacks = [
    callbacks.ModelCheckpoint(filepath=filepath,
                              verbose=1,
                              monitor='val_loss',
                              save_best_only=True),
    #Check out the train history later with Tensorboard
    callbacks.TensorBoard(log_dir=log_dir),
    callbacks.EarlyStopping(patience=1)
]

k_model.fit(x_train,
            y_train,
            epochs=1000,
            validation_split=0.1,
            verbose=2,
            callbacks=callbacks)
del k_model
k_model = models.load_model(filepath=filepath)
test_result = k_model.evaluate(x=x_test, y=y_test)
print(
    f'Test result:\nTest loss = {test_result[0]}, Test accuracy = {test_result[1]}'
)