Example #1
0
def train(model_name):
    # load the data
    (train_images, train_labels), (test_images,
                                   test_labels) = fashion_mnist.load_data()

    # create a TensorBoard callback
    model_dir = root_dir(os.path.join('training', model_name))
    tensorboard_callback = TensorBoard(log_dir=model_dir)

    # create a checkpoint callback
    checkpoint_path = root_dir(os.path.join(model_dir, 'weights.ckpt'))
    checkpoint_callback = ModelCheckpoint(checkpoint_path,
                                          save_weights_only=True)

    # train the model
    model = create_model()
    model.fit(train_images,
              train_labels,
              epochs=5,
              callbacks=[checkpoint_callback, tensorboard_callback],
              validation_data=(test_images, test_labels))
Example #2
0
def get_call_back(lr, batch_size):
    """
    定义call back
    :return:
    """
    text = '{}-{}-{}'.format(cfg.base_model_name, batch_size, lr)
    checkpoint = ModelCheckpoint(filepath='/tmp/ssd-' + text + '.{epoch:03d}.h5',
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=True,
                                 save_freq='epoch'
                                 )
    reduce = ReduceLROnPlateau(monitor='val_loss', factor=0.2, min_delta=2e-3,
                               patience=10, min_lr=1e-5)
    stop = EarlyStopping(monitor='val_loss', patience=20)
    # scheduler = LearningRateScheduler(lr_schedule(epochs, lr))

    log = TensorBoard(log_dir='log-{}-{}'.format(text,
                                                 time.strftime("%Y%m%d", time.localtime())))
    return [checkpoint, reduce, stop, log]
Example #3
0
    def __init__(self):
        self.model_id = None
        self.input_param = None
        self.nb_steps = None
        self.step_length = None
        # --- TF ---
        self.model = None
        self.losses = None
        self.loss_lambdas = None
        self.opt_param = None
        self.decay = None
        self.type_loss = None

        self.model_options = None

        # Spare GPU
        tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
        self.allow_growth()

        log_dir = os.path.join('tensorboard', f'{time()}')
        self.tensorboard = TensorBoard(log_dir=log_dir)
Example #4
0
def train(args, params):
    '''
    Train model
    '''
    model = build_model(params)
    X_train, Y1_train, Y2_train, X_test, Y1_test, Y2_test = load_dataset(
        args.dataset_name, float(args.num_train))

    print('Fitting model...')
    results = model.fit(
        X_train, [Y1_train, Y2_train],
        epochs=args.epochs,
        verbose=1,
        validation_data=(X_test, [Y1_test, Y2_test]),
        callbacks=[SendMetrics(),
                   TensorBoard(log_dir=TENSORBOARD_DIR)])

    _, acc = model.evaluate(X_test, [Y1_test, Y2_test], verbose=0)
    LOG.debug('Final result is: %d', acc)
    nni.report_final_result(acc)
    print('Final result is: %d', acc)
    def compile_model(self):
        # init summary writer for tensorboard
        self.callback1 = TensorBoard(self.log_dir + '/discriminator')
        self.callback2 = TensorBoard(self.log_dir + '/generator')
        self.callback3 = TensorBoard(self.log_dir + '/generated_images')

        # model stuff
        input_shape = [self.image_size, self.image_size, self.image_channels]
        adam1 = Adam(lr=self.lr)
        adam2 = Adam(lr=self.lr * 2)

        # init and add multi-gpu support
        try:
            self.discriminator = multi_gpu_model(self.discriminator(),
                                                 gpus=self.gpu)
        except:
            self.discriminator = self.discriminator()
        try:
            self.generator = multi_gpu_model(self.generator(), gpus=self.gpu)
        except:
            self.generator = self.generator()

        # compile discriminator
        self.discriminator.compile(loss='binary_crossentropy', optimizer=adam1)

        # compile generator
        input_tensor = Input(shape=input_shape)
        generated_catroon_tensor = self.generator(input_tensor)
        self.discriminator.trainable = False  # for here we only train the generator
        discriminator_output = self.discriminator(generated_catroon_tensor)
        self.train_generator = Model(
            input_tensor,
            outputs=[generated_catroon_tensor, discriminator_output])
        # add multi-gpu support
        try:
            self.train_generator = multi_gpu_model(self.train_generator,
                                                   gpus=self.gpu)
        except:
            pass
        self.train_generator.compile(
            loss=[self.vgg_loss, 'binary_crossentropy'],
            loss_weights=[float(self.weight), 1.0],
            optimizer=adam2)

        # set callback model
        self.callback1.set_model(self.discriminator)
        self.callback2.set_model(self.train_generator)
        self.callback3.set_model(self.train_generator)
Example #6
0
def createModel(CONV_LAYERS,
                CONV_LAYER_SIZE,
                DENSE_LAYERS,
                DENSE_LAYER_SIZE,
                BATCH_SIZE,
                OPTIMIZER="adam",
                LOSS_FUNCTION="binary_crossentropy",
                EPOCHS=2):
    lf = LOSS_FUNCTION.replace("_", "-")
    global MODEL_NAME
    MODEL_NAME = f"R-gyar-prototype-model_convs-{CONV_LAYERS}_convnodes-{CONV_LAYER_SIZE}_denses-{DENSE_LAYERS}_densenodes-{DENSE_LAYER_SIZE}_batch-{BATCH_SIZE}_opt-{OPTIMIZER}_loss-{lf}_epochs-{EPOCHS}_time-{round(time.time())}"
    global tensorboard
    tensorboard = TensorBoard(log_dir='prototypelogs/{}'.format(MODEL_NAME))
    if MODEL_NAME in os.listdir('prototypelogs'):
        raise FileExistsError
    else:
        if Train:
            print("Model Created: ", MODEL_NAME)

        model = Sequential()

        model.add(Conv2D(CONV_LAYER_SIZE, (3, 3), input_shape=(100, 100, 1)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        for l in range(CONV_LAYERS - 1):
            model.add(Conv2D(CONV_LAYER_SIZE, (3, 3)))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten()
                  )  # this converts our 3D feature maps to 1D feature vectors
        for l in range(DENSE_LAYERS):
            model.add(Dense(DENSE_LAYER_SIZE))
            model.add(Activation('relu'))
            model.add(Dropout(0.5))

        model.add(Dense(1))
        model.add(Activation('sigmoid'))
        return model
    def init_callbacks(self):
        experiment = None
        if hasattr(self.config, "comet_api_key"):
            experiment = Experiment(api_key=self.config.comet_api_key,
                                    project_name=self.config.experiment_name)
            experiment.log_parameters(self.config)
            self.callbacks.append(experiment.get_keras_callback())

        self.callbacks.append(
            Evaluater(eval_data=self.data_loader.get_val_generator(),
                      eval_steps=self.data_loader.get_val_steps(),
                      ref_data=self.data_loader.get_reference_data(),
                      config=self.config,
                      comet_experiment=experiment))

        self.callbacks.append(
            ReduceLROnPlateau(monitor=self.config.checkpoint_monitor,
                              factor=self.config.lr_reduce_factor,
                              patience=self.config.lr_reduce_patience,
                              verbose=1,
                              mode=self.config.checkpoint_mode))

        self.callbacks.append(
            ModelCheckpoint(
                filepath=os.path.join(
                    self.config.checkpoint_dir,
                    '%s-{epoch:02d}-{val_accuracy:.2f}.hdf5' %
                    self.config.experiment_name),
                monitor=self.config.checkpoint_monitor,
                mode=self.config.checkpoint_mode,
                save_best_only=self.config.checkpoint_save_best_only,
                save_weights_only=self.config.checkpoint_save_weights_only,
                verbose=self.config.checkpoint_verbose,
            ))

        self.callbacks.append(
            TensorBoard(
                log_dir=self.config.tensorboard_log_dir,
                write_graph=self.config.tensorboard_write_graph,
            ))
    def train(self,
              x_train,
              y_train,
              learning_rate,
              epochs,
              batch_size,
              tb_logs_dir=None,
              verbose=False):
        early_stopping_callback = EarlyStopping(monitor="val_loss",
                                                patience=25)
        callbacks = [early_stopping_callback]

        if bool(tb_logs_dir):
            date_time = datetime.now().strftime('%Y-%m-%d-%H%M%S')
            log_name = os.path.join(tb_logs_dir,
                                    "{}_{}".format("transfer_net", date_time))
            # defining callbacks for training
            tensorboard_callback = TensorBoard(log_dir=log_name,
                                               write_graph=True,
                                               write_images=True)
            callbacks += [tensorboard_callback]

        x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                          y_train,
                                                          train_size=0.8,
                                                          random_state=1)
        y_train = tf.keras.utils.to_categorical(y_train,
                                                self.num_output_classes)
        y_val = tf.keras.utils.to_categorical(y_val, self.num_output_classes)

        self.model.compile(loss="categorical_crossentropy",
                           optimizer=keras.optimizers.Adam(lr=learning_rate),
                           metrics=['accuracy', f1_score])
        self.hist = self.model.fit(x_train,
                                   y_train,
                                   epochs=epochs,
                                   batch_size=batch_size,
                                   verbose=verbose,
                                   callbacks=callbacks,
                                   validation_data=(x_val, y_val))
Example #9
0
    def train(self):
        dataset = Datasets("./data/brunch")
        data, label, mask = dataset.read()
        print(data.shape, label.shape, mask.shape)
        # save model every 5 epoch
        filepath = "./data/checkpoints/init/model-{epoch:02d}.hdf5"
        checkpoint = ModelCheckpoint(filepath,
                                     save_weights_only=False,
                                     verbose=1)
        # display tensorboard
        log_dir = "./data/logs/fit/" + datetime.datetime.now().strftime(
            "%Y%m%d-%H%M%S")
        tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)

        self.model.fit(x=[data, label, mask],
                       y=[mask, mask],
                       steps_per_epoch=None,
                       batch_size=self.batch_size,
                       shuffle=True,
                       epochs=10,
                       validation_split=0.1,
                       callbacks=[checkpoint, tensorboard_callback])
Example #10
0
    def _build_model(self):
        model = Sequential()
        # input shape (samples, channels, rows, cols)
        model.add(Conv2D(32, (3, 3), strides=(1, 1), input_shape=(24, 10, 1),
                         kernel_initializer=initializers.glorot_uniform(), activation=activations.relu,
                         kernel_regularizer=regularizers.l2(0.01)))  # kernel initialize weights

        model.add(Conv2D(64, (3, 3), strides=(1, 1), activation=activations.relu))

        model.add(Conv2D(128, (3, 3), strides=(1, 1), activation=activations.relu))

        # model.add(Dropout(0.5))
        model.add(Flatten())

        model.add(Dense(self.action_size, activation=activations.softmax))

        model.compile(loss=losses.categorical_crossentropy,  # loss='mse' losses.categorical_crossentropy
                      optimizer=optimizers.Nadam(lr=self.LEARNING_RATE))  # RMSProb,Adam
        self.tensorBoard = TensorBoard('./logs/RLAgent', histogram_freq=0,
                                       write_graph=True, write_images=True)
        model.summary()
        return model
Example #11
0
    def __init__(self, gen_file_path, epsilon=1.0):
        self.map_gen_file = gen_file_path
        self.epsilon = epsilon
        self.tiles_per_col = c.COL_HEIGHT - 2 if c.INSERT_GROUND else c.COL_HEIGHT
        self.gen_size = c.GEN_LENGTH * self.tiles_per_col
        self.checkpoint_gen = os.path.join(dir_path, "checkpoints", "generator")
        self._TILE_MAP, self._CHAR_MAP = self.tokenize_tiles(c.GENERATOR_TILES)
        # print("generator._TILE_MAP:", self._TILE_MAP)
        # print("generator._CHAR_MAP:", self._CHAR_MAP)

        self.replay_memory = deque(maxlen=c.REPLAY_MEMORY_SIZE)
        loaded_model_path = self.load_model_path() if c.LOAD_GEN_MODEL else None
        callback_dir = os.path.join(dir_path, "checkpoints", "generator_graphs", f"graph_{self.start_checkpoint}")
        os.makedirs(callback_dir, exist_ok=True)

        self.generator = self.create_generator(model=loaded_model_path)
        self.tensorboard = TensorBoard(log_dir=callback_dir, histogram_freq=0, write_graph=True, write_images=False)
        print("tensorboard callback dir:", callback_dir)

        if loaded_model_path:
            self.load_replay_memory()
        print(self.generator.summary())
    def build_checkpointers(self):

        checkpointers = []

        if self.model_path is not None:
            checkpointer = ModelCheckpoint(filepath=self.model_path + '.h5',
                                           monitor='val_loss',
                                           verbose=1,
                                           save_best_only=True,
                                           period=10)
            checkpointers.append(checkpointer)

        if self.log_dir is not None:
            tensorboard = TensorBoard(log_dir=self.log_dir,
                                      histogram_freq=0,
                                      batch_size=32,
                                      write_graph=True,
                                      write_grads=False,
                                      write_images=False)
            checkpointers.append(tensorboard)

        return checkpointers
Example #13
0
    def __setup_callbacks(self) -> List:
        """
        Sets up the callbacks for training
        :return: the early stopping schedule, tensorboard data and the checkpointer
        """

        # Create a folder for the model log of the current experiment
        weights_log_path = os.path.join(self.__current_experiment_path,
                                        'weights')

        # Set up the callback to save the best weights after each epoch
        checkpointer = ModelCheckpoint(filepath=os.path.join(
            weights_log_path, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=True,
                                       monitor='val_loss',
                                       mode='min')

        # Set up Tensorboard
        tensorboard = TensorBoard(log_dir=os.path.join(
            self.__current_experiment_path, 'tensorboard'),
                                  write_graph=True,
                                  histogram_freq=0,
                                  write_grads=True,
                                  write_images=False,
                                  batch_size=self._params.batch_size,
                                  update_freq=self._params.batch_size)

        # Set up early stopping to interrupt the training if val_loss is not increasing after n epochs
        early_stopping = EarlyStopping(monitor='val_loss',
                                       patience=25,
                                       mode='min')

        csv_logger = CSVLogger(os.path.join(self.__current_experiment_path,
                                            "training.csv"),
                               append=True)

        return [early_stopping, tensorboard, checkpointer, csv_logger]
Example #14
0
def main(mode):
  
    if mode == 'hdf5':
        print("Training from hdf5")
        training_file = '../data/hdf5/training.h5'
        validation_file = '../data/hdf5/validation.h5'
        model_path = '../models/hdf5'
        log_dir = '../logs/hdf5{0}'.format(strftime('%H%M%S'))        
        x_train, y_train, x_test, y_test = load_data_from_hdf5(training_file, validation_file)     
    elif mode == 'tfrecords':
        print("Training from TFRecords")
        training_file = '../data/tfrecords/training.tfrecords'
        validation_file = '../data/tfrecords/validation.tfrecords'
        model_path = '../models/tfrecords'
        log_dir = '../logs/tfrecors{0}'.format(strftime('%H%M%S')) 
        x_train, y_train, x_test, y_test = load_data_from_tfrecords(training_file, validation_file)
    else:
        print("Pitty ...")
        return
        
    tensorboard = TensorBoard(log_dir=log_dir)
    
    input_name = "image"

    model = model_fn(IMAGE_SHAPE, INPUT_NAME)

    x_train = x_train/255
    x_test = x_test/255
    
    y_train =  tf.keras.utils.to_categorical(y_train, NUM_CLASSES)
    y_test =  tf.keras.utils.to_categorical(y_test, NUM_CLASSES)

    model.fit(x_train, y_train, batch_size=32, epochs=5, verbose=1, callbacks=[tensorboard])
    
    model_file = join(model_path, 'cnn_{0}'.format(strftime('%H%M%S')))
    model.save(model_file)
    
    results = model.evaluate(x_test, y_test)
    print(results)
Example #15
0
    def train(self):
        tbCallBack = TensorBoard(log_dir='./td_lstm_logs', histogram_freq=0, write_graph=True, write_images=True)

        texts_raw_indices, texts_left_indices, aspects_indices, texts_right_indices, polarities_matrix = \
            read_dataset(type=self.DATASET,
                         mode='test',
                         embedding_dim=self.EMBEDDING_DIM,
                         max_seq_len=self.MAX_SEQUENCE_LENGTH, max_aspect_len=self.MAX_ASPECT_LENGTH)

        left_input = np.concatenate((texts_left_indices, aspects_indices), axis=1)
        right_input = np.concatenate((texts_right_indices, aspects_indices), axis=1)

        for i in range(1, self.ITERATION):
            print()
            print('-' * 50)
            print('Iteration', i)
            self.model.fit([self.left_input, self.right_input], self.polarities_matrix,
                           validation_data=([left_input, right_input], polarities_matrix),
                           batch_size=self.BATCH_SIZE, callbacks=[tbCallBack])
            if i % 5 == 0:
                self.model.save('td_lstm_saved_model.h5')
                print('model saved')
Example #16
0
def train_model(data, args):
    train_config = TrainConfig(args)
    mode_module = importlib.import_module("modes." + args.mode)

    train_generator = mode_module.DataGenerator(data.train_data)
    val_generator = mode_module.DataGenerator(data.validation_data)

    model = mode_module.build_model()

    model.compile(optimizer=train_config.optimizer,
                  loss=train_config.loss,
                  metrics=train_config.metrics)

    results_csv_file = os.path.join(args.results_dir, "results.csv")
    ckpt_filename = "Epoch-{epoch:02d}-Val-Acc-{val_accuracy:.4f}.hdf5"
    weight_file = os.path.join(args.checkpoints_dir, ckpt_filename)

    results_callback = CSVLogger(results_csv_file, append=True, separator=',')
    checkpoints_callback = ModelCheckpoint(weight_file,
                                           save_best_only=True,
                                           save_weights_only=True)

    tensorboard_callback = TensorBoard(log_dir=args.results_dir,
                                       histogram_freq=0,
                                       write_graph=True,
                                       write_images=True)

    model.fit_generator(generator=train_generator,
                        validation_data=val_generator,
                        verbose=2,
                        epochs=train_config.epochs,
                        shuffle=True,
                        callbacks=[
                            results_callback, tensorboard_callback,
                            checkpoints_callback
                        ])

    return model
Example #17
0
    def setup_callbacks(weights_log_path: str, batch_size: int,
                        lr: float) -> List[tf.keras.callbacks.Callback]:
        """
        Sets up the callbacks for the training of the model.
        """

        # Setup callback to save the best weights after each epoch
        checkpointer = ModelCheckpoint(filepath=os.path.join(
            weights_log_path, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=True,
                                       monitor='val_loss',
                                       mode='min')

        tensorboard_log_dir = os.path.join(weights_log_path, 'tensorboard')

        # Note that update_freq is set to batch_size * 10,
        # because the epoch takes too long and batch size too short
        tensorboard = TensorBoard(log_dir=tensorboard_log_dir,
                                  write_graph=True,
                                  histogram_freq=0,
                                  write_grads=True,
                                  write_images=False,
                                  batch_size=batch_size,
                                  update_freq=batch_size * 10)

        def lrs(epoch):
            if epoch > 10:
                return lr / 10
            elif epoch > 6:
                return lr / 5
            else:
                return lr

        lr_schedule = LearningRateScheduler(lrs, verbose=1)

        return [tensorboard, checkpointer, lr_schedule]
Example #18
0
    def __init__(self):
        print(tf.__version__)

        fashion_mnist = keras.datasets.fashion_mnist

        (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()

        class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 
               'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
        
        print("train_images shape is: %s".format(train_images.shape))
        print("length of data: %s".format(len(train_labels)))
        tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
        train_images = train_images / 255.0 # normalize data
        test_images = test_images / 255.0 # normalize data
        plt.figure(figsize=(10,10))
        for i in range(25):
            plt.subplot(5,5,i+1)
            plt.xticks([])
            plt.yticks([])
            plt.grid(False)
            plt.imshow(train_images[i], cmap=plt.cm.binary)
            plt.xlabel(class_names[train_labels[i]])
        model = keras.Sequential([
            keras.layers.Flatten(input_shape=(28,28)),
            keras.layers.Dense(128, activation=tf.nn.relu),
            keras.layers.Dense(10, activation=tf.nn.softmax)
        ])
        model.compile(optimizer=keras.optimizers.Adagrad(lr=0.1, epsilon=None, decay=0.0),
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy']
        )
        model.fit(train_images, train_labels, epochs=5, 
        callbacks=[tensorboard])
        test_loss, test_acc = model.evaluate(test_images, test_labels)
        print('Точность после проверки: ', test_acc)
        predictions = model.predict(test_images)
        model.save("fashion_mnist_model.h5")
Example #19
0
    def setup_callables(self):
        monitor = "val_dice_coef"
        # Setup callback to save best weights after each epoch
        checkpointer = ModelCheckpoint(filepath=os.path.join(
            self.model_dir, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=True,
                                       monitor=monitor,
                                       mode='max')
        # setup callback to register training history
        csv_logger = CSVLogger(os.path.join(self.log_dir, 'log.csv'),
                               append=True,
                               separator=';')

        # setup logger to catch warnings and info messages
        set_logger(os.path.join(self.log_dir, 'train_val.log'))

        # setup callback to retrieve tensorboard info
        tensorboard = TensorBoard(log_dir=self.log_dir,
                                  write_graph=True,
                                  histogram_freq=0)

        # setup early stopping to stop training if val_loss is not increasing after 3 epochs
        early_stopping = EarlyStopping(monitor=monitor,
                                       patience=5,
                                       mode='max',
                                       verbose=0)
        lr_reducer = ReduceLROnPlateau(monitor=monitor,
                                       factor=0.05,
                                       cooldown=0,
                                       patience=5,
                                       verbose=0,
                                       mode='max')

        return [
            checkpointer, csv_logger, tensorboard, early_stopping, lr_reducer
        ]
Example #20
0
def get_densenet121_model(classes=2):
    def preprocess_input(img):
        img[:, :, 0] = (img[:, :, 0] - 103.94) * 0.017
        img[:, :, 1] = (img[:, :, 1] - 116.78) * 0.017
        img[:, :, 2] = (img[:, :, 2] - 123.68) * 0.017
        return img.astype(np.float32)

    def decode_img(img):
        img[:, :, 0] = (img[:, :, 0] / 0.017) + 103.94
        img[:, :, 1] = (img[:, :, 1] / 0.017) + 116.78
        img[:, :, 2] = (img[:, :, 2] / 0.017) + 123.68
        return img.astype(np.uint8)

    base_model = tf.keras.applications.DenseNet121(include_top=False,
                                                   classes=2)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    pre = Dense(classes, activation='softmax', name='fc1000')(x)
    model = Model(inputs=base_model.input, outputs=pre)
    model.summary()
    for layer in base_model.layers:
        layer.trainable = False

    ckpt = './ckpt/densenet121.h5'
    checkpoint = ModelCheckpoint(filepath=ckpt)
    tensorboard = './log/densenet121'
    tensorboard = TensorBoard(log_dir=tensorboard)
    if os.path.exists(ckpt):
        model.load_weights(ckpt, by_name=True)
        print("load done")
    else:
        plot_model(model, to_file='densenet121.png')

    model.compile(optimizer=tf.train.AdamOptimizer(0.001),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model, checkpoint, tensorboard, preprocess_input, decode_img
Example #21
0
    def train_model(self,
                    model,
                    train_images,
                    train_labels,
                    val_images,
                    val_labels):
        """
        """
        es = EarlyStopping(monitor='loss',
                           mode='max',
                           verbose=1,
                           patience=cfg.patience)
        # modelcheckpoint
        mc = ModelCheckpoint(super().get_path_to_save(),
                             monitor='acc',
                             mode='max',
                             save_best_only=True,
                             verbose=1)
        # tensorborad
        logdir = os.path.join(cfg.pfad_zu_logs, cfg.keras_model_name)
        tb = TensorBoard(log_dir=logdir,
                         histogram_freq=0,
                         write_graph=True,
                         write_images=False)

        callbak = [es, mc, tb]
        self.compile_model(model)
        print("______________Anfang des Trainings____________________")
        history = model.fit(train_images,
                            train_labels,
                            validation_data=(val_images, val_labels),
                            epochs=self.__Num_epochs,
                            batch_size=self.__batch_size,
                            verbose=1,
                            validation_split=self.__validation_split,
                            callbacks=callbak)
        print("training fertig")
        return history
Example #22
0
 def fitModel(self, training_features: np.ndarray,
              training_results_labels: np.ndarray, epochs: int) -> None:
     """
     Takes the training set and its corresponding labels of the match outcome.
     Uses Keras to perform propagation and backpropagation to train the network.
     The final hyperparameters are set - batch size, epoch and callback to TensorBoard
     for further visualisation.
     :param training_features: NumPy array of the feature vectors (shape [10,])
     :param training_results_labels: NumPy array of all the corresponding outcomes
     :param epochs: Integer of the epoch count training will be carried through
     """
     tensorboard = TensorBoard(log_dir="logs\{}".format(time()))
     try:
         self._model.fit(training_features,
                         training_results_labels,
                         epochs=epochs,
                         callbacks=[tensorboard],
                         batch_size=32)
     except ValueError as e:
         print(
             "FAILED TRAINING, bad vector shape? not using NumPy array? :",
             e)
         exit(1)
Example #23
0
    def train_model(self, model, X_train, y_train):
        start_time = time.time()

        model.compile(optimizer='adam',
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])

        tensorboard = TensorBoard(log_dir=settings.lOGS_DIR)
        early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                       min_delta=0.005,
                                                       patience=2,
                                                       verbose=0,
                                                       mode='auto')
        history = model.fit(X_train,
                            y_train,
                            validation_split=0.2,
                            epochs=20,
                            callbacks=[tensorboard, early_stopping])

        end_time = time.time()
        print('Total train time = ', round(end_time - start_time), 's')
        self._visualize_model_training(history)
        return model
def makemod(LSTM_layers, LSTM_sizes, Dense_layers, loss_type="categorical_crossentropy", opt="adam"):
    """Defines, compiles and fits models"""
    for lstmlayer in LSTM_layers:
        for lstmsize in LSTM_sizes:
            for denselayer in Dense_layers:
                NAME = "{}-24 {}-LSTM-{}-Nodes-{}-Dense".format(text_designation, lstmlayer, lstmsize, denselayer)
                model = Sequential()
                for l in range(lstmlayer - 1):
                    model.add(LSTM(lstmsize, return_sequences=True, input_shape=(x_train.shape[1], x_train.shape[2])))
                model.add(LSTM(lstmsize, input_shape=(x_train.shape[1], x_train.shape[2])))
                for l in range(denselayer):
                    model.add(Dense(vocab_size, activation='relu'))
                model.add(Dense(vocab_size, activation='softmax'))
                print(model.summary())
                # Log the model
                tb = TensorBoard(log_dir="logs/{}".format(NAME))
                # Compile model
                model.compile(loss=loss_type, optimizer=opt, metrics=["accuracy"])
                model.fit(x_train, y_train, epochs=1000, validation_split=0.1, verbose=2, callbacks=[tb])
                print("Model {} created".format(NAME))
                # Save Model
                model.save(NAME)
                print("Model {} saved".format(NAME))
Example #25
0
    def __train__(self, data_vector: {}, epochs: int = 15, log_dir=None):
        """
        Trains a CNN on a given data vector
        :param data_vector: training data vector
        :param epochs: maximal number of epochs to train
        :param log_dir: directory to save the log files, usually userdir/pickX/logs
        :return: self
        """
        callbacks = []
        if log_dir is not None:
            callbacks.append(TensorBoard(log_dir=log_dir))
            callbacks.append(EarlyStopping("val_loss", patience=2))

        self.model.fit(x=data_vector[self.feature_field],
                       y=data_vector['labels'],
                       validation_split=0.1,
                       batch_size=32,
                       epochs=epochs,
                       callbacks=callbacks)
        self.model.summary()
        self.input_dimension = self.model.input_shape[1:]

        return self
    def init_callbacks(self):
        self.callbacks.append(
            ModelCheckpoint(
                filepath=os.path.join(self.config.callbacks.checkpoint_dir,
                                      '%s-{epoch:02d}-{val_loss:.2f}.hdf5' % self.config.exp.name),
                monitor=self.config.callbacks.checkpoint_monitor,
                mode=self.config.callbacks.checkpoint_mode,
                save_best_only=self.config.callbacks.checkpoint_save_best_only,
                save_weights_only=self.config.callbacks.checkpoint_save_weights_only,
                verbose=self.config.callbacks.checkpoint_verbose,
            )
        )

        self.callbacks.append(
            TensorBoard(
                log_dir=self.config.callbacks.tensorboard_log_dir,
                write_graph=self.config.callbacks.tensorboard_write_graph,
            )
        )

        self.callbacks.append(
            CSVLogger(os.path.join(self.config.callbacks.history_dir, "parameters.csv"), separator=',', append=False)
        )
Example #27
0
    def forward(self, X_train, X_test, y_train, y_test):
        X_shape = X_train.shape[1]
        y_shape = y_train.shape[1]
        X = Input(shape=(self.image_size, self.image_size, 1), name='input')
        label = Input(shape=(y_shape,), name='label')
        
        encoder, shape = self.encode(X, label)
        encoder.summary()

        z_inputs = Input(shape=(self.n_dim,), name='latent_input')
        decoder = self.decode(z_inputs, label, shape)
        decoder.summary()

        z_output = encoder([X, label])[2]
        outputs = decoder([z_output, label])
        cvae = Model([X, label], outputs, name='cvae')
        cvae.compile(optimizer=Adam(lr=self.learning_rate, decay=self.decay_rate, epsilon=1e-08), loss=self.vae_loss)
        cvae.summary()
        tensorboard = TensorBoard(log_dir="{}/{}".format(self.logs_dir,time()))
        cvae_hist = cvae.fit([X_train, y_train], X_train, verbose=1, batch_size=self.batch_size, epochs=self.epochs,
                     validation_data=([X_test, y_test], X_test), callbacks=[tensorboard], shuffle=True)
        decoder.save(self.args.save_model + '.h5')
        return cvae, cvae_hist
Example #28
0
def train(args):
    '''
    Train model
    '''
    model = build_model()
    X_train, Y1_train, Y2_train, X_dev, Y1_dev, Y2_dev, X_test, Y1_test, Y2_test = load_dataset(
        args.dataset_name)

    mc = ModelCheckpoint('best_model.h5',
                         monitor='dense_2_categorical_accuracy',
                         mode='max',
                         verbose=1,
                         save_best_only=True)
    es = EarlyStopping(monitor='loss', mode='min', verbose=1, patience=10)

    print('Fitting model...')
    results = model.fit(
        X_train, [Y1_train, Y2_train],
        epochs=args.epochs,
        verbose=1,
        validation_data=(X_dev, [Y1_dev, Y2_dev]),
        callbacks=[TensorBoard(log_dir=TENSORBOARD_DIR), es, mc])

    _, _, _, cat_acc, subcat_acc = model.evaluate(X_test, [Y1_test, Y2_test],
                                                  verbose=0)
    print('last model:')
    print(cat_acc)
    print(subcat_acc)
    model.save("model.h5")
    print("Saved model to disk")

    model1 = load_model('best_model.h5')
    _, _, _, cat_acc, subcat_acc = model1.evaluate(X_test, [Y1_test, Y2_test],
                                                   verbose=0)
    print('best model:')
    print(cat_acc)
    print(subcat_acc)
Example #29
0
def get_callbacks(model_name: str) -> List[Union[TensorBoard, EarlyStopping, ModelCheckpoint]]:
    """Accepts the model name as a string and returns multiple callbacks for training the keras model.

    Parameters
    ----------
    model_name : str
        The name of the model as a string.

    Returns
    -------
    List[Union[TensorBoard, EarlyStopping, ModelCheckpoint]]
        A list of multiple keras callbacks.
    """
    logdir = (
        "logs/scalars/" + model_name + "_" + datetime.now().strftime("%Y%m%d-%H%M%S")
    )  # create a folder for each model.
    tensorboard_callback = TensorBoard(log_dir=logdir)
    # use tensorboard --logdir logs/scalars in your command line to startup tensorboard with the correct logs

    early_stopping_callback = EarlyStopping(
        monitor="val_mean_absolute_percentage_error",
        min_delta=1,  # model should improve by at least 1%
        patience=10,  # amount of epochs  with improvements worse than 1% until the model stops
        verbose=2,
        mode="min",
        restore_best_weights=True,  # restore the best model with the lowest validation error
    )

    model_checkpoint_callback = ModelCheckpoint(
        "./data/models/" + model_name + ".h5",
        monitor="val_mean_absolute_percentage_error",
        verbose=0,
        save_best_only=True,  # save the best model
        mode="min",
        save_freq="epoch",  # save every epoch
    )  # saving eff_net takes quite a bit of time
    return [tensorboard_callback, early_stopping_callback, model_checkpoint_callback]
Example #30
0
    def train(self, train_gen, val_gen,
              saved_model_path, epochs=100, steps=100, train_split=0.8,
              verbose=1, min_delta=.0005, patience=5, use_early_stop=True):
        """
        train_gen: generator that yields an array of images an array of

        """

        # checkpoint to save model after each epoch
        save_best = ModelCheckpoint(saved_model_path,
                                    monitor='val_loss',
                                    verbose=verbose,
                                    save_best_only=True,
                                    mode='min')

        # stop training if the validation error stops improving.
        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=min_delta,
                                   patience=patience,
                                   verbose=verbose,
                                   mode='auto')

        # callbacks_list = [save_best]
        callbacks_list = [save_best, TensorBoard(log_dir='./tmp/log')]

        if use_early_stop:
            callbacks_list.append(early_stop)

        hist = self.model.fit_generator(
            train_gen,
            steps_per_epoch=steps,
            epochs=epochs,
            verbose=1,
            validation_data=val_gen,
            callbacks=callbacks_list,
            validation_steps=steps * (1.0 - train_split) / train_split)
        return hist