Exemple #1
0
    def setup_callbacks(self,
                        mode,
                        callbacks,
                        env,
                        steps,
                        visualize,
                        verbose=True,
                        log_interval=10000,
                        episodes=10):
        if mode == 'train':
            callbacks = [] if not callbacks else callbacks[:]
            if verbose:
                callbacks += [TrainIntervalLogger(interval=log_interval)]
            if visualize:
                callbacks += [Visualizer()]
            history = History()
            callbacks += [history]
            callbacks = CallbackList(callbacks)
            if hasattr(callbacks, 'set_model'):
                callbacks.set_model(self)
            else:
                callbacks._set_model(self)
            callbacks._set_env(env)
            params = {
                'steps': steps,
            }
            if hasattr(callbacks, 'set_params'):
                callbacks.set_params(params)
            else:
                callbacks._set_params(params)

        elif mode == 'test':
            callbacks = [] if not callbacks else callbacks[:]
            if verbose:
                callbacks += [TestLogger()]
            if visualize:
                callbacks += [Visualizer()]
            history = History()
            callbacks += [history]
            callbacks = CallbackList(callbacks)
            if hasattr(callbacks, 'set_model'):
                callbacks.set_model(self)
            else:
                callbacks._set_model(self)
            callbacks._set_env(env)
            params = {
                'episodes': episodes,
            }
            if hasattr(callbacks, 'set_params'):
                callbacks.set_params(params)
            else:
                callbacks._set_params(params)

        return callbacks, history
Exemple #2
0
def train_model(model,
                epoch,
                data,
                loss='mse',
                optimizer='rmsprop',
                save_best_only=True,
                metrics=None,
                show=False):
    # evaluation matrix
    metrics = ['mse'] if metrics is None else metrics
    model_name = model.name
    model_path = os.path.join(os.path.abspath(os.curdir), "saved_models",
                              model_name, get_time_stamp())
    if not os.path.exists(model_path):
        os.makedirs(model_path)
        os.makedirs(os.path.join(model_path, "saved_checkpoints"))
    # save the original dataset
    x_train, x_test, y_train, y_test = data
    with open(os.path.join(model_path, "data.pickle"), 'wb') as f:
        pickle.dump((x_train, x_test, y_train, y_test), f)
    # keras build in model structure visualization
    plot_model(model, to_file=os.path.join(model_path, f"{model_name}.png"))
    model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
    # add keras callbacks save history, tensorboard record and checkpoints
    history = History()
    tensorboard = TensorBoard(log_dir=os.path.join(model_path, "logs"),
                              update_freq="epoch")
    checkpoints = ModelCheckpoint(os.path.join(model_path, "saved_checkpoints",
                                               f"weights-{epoch:02d}.hdf5"),
                                  monitor='val_loss',
                                  mode='auto',
                                  save_freq='epoch',
                                  save_best_only=save_best_only)

    model.fit(x_train,
              y_train,
              validation_data=(x_test, y_test),
              callbacks=[history, tensorboard, checkpoints],
              epochs=epoch)
    # plot the history
    history_plot = plot_history(history.history, show=show)
    history_plot.savefig(os.path.join(model_path, f"{model_name}_loss.png"))
    history_plot.close()
    # select the last model
    selected_file = os.listdir(os.path.join(model_path,
                                            "saved_checkpoints"))[-1]
    selected_model = load_check_point(
        os.path.join(model_path, "saved_checkpoints", selected_file))
    # plot the predicted value with the actual value
    x_origin, y_origin = concatenate_data(x_train, y_train, x_test, y_test)
    test_size = len(y_test)
    predict_plot = predict_and_plot(selected_model,
                                    x_origin,
                                    y_origin,
                                    test_size=test_size,
                                    show=show)
    predict_plot.savefig(os.path.join(model_path, f"{model_name}_value.png"))
    predict_plot.close()
    with open(os.path.join(model_path, "history.pickle"), 'wb') as f:
        pickle.dump(history.history, f)
Exemple #3
0
def train_gate(model, weights_file):
        history = History()
        highest_acc = 0
        iterationsWithoutImprovement = 0
        lr = .001
        for i in range(1):
            # load_weights()
            model.fit_generator(datagen.flow(x_train, y_train, batch_size=50),
                                       epochs=1,
                                       steps_per_epoch=len(x_train) / 50,
                                       validation_data=(x_test, y_test), callbacks=[history],
                                       workers=4, verbose=1)
            val_acc = history.history['val_acc'][-1]
            if (val_acc > highest_acc):
                model.save_weights(weights_file + '.hdf5')
                print("Saving weights, new highest accuracy: " + str(val_acc))
                highest_acc = val_acc
                iterationsWithoutImprovement = 0
            else:
                iterationsWithoutImprovement += 1
                if (iterationsWithoutImprovement > 3):
                    lr *= .5
                    K.set_value(model.optimizer.lr, lr)
                    print("Learning rate reduced to: " + str(lr))
                    iterationsWithoutImprovement = 0
Exemple #4
0
def train():
    (X_train, _), (_, _) = mnist.load_data()

    X_train = X_train / 255.0

    model = create_model()
    model.compile(tf.train.AdamOptimizer(), loss_function)
    model.summary()

    tpu_grpc_url = "grpc://" + os.environ["COLAB_TPU_ADDR"]
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
        tpu_grpc_url)
    strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
    model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)

    cb = Sampling(model)
    hist = History()
    dummy_rand = np.zeros((X_train.shape[0], 64))
    y_train = np.concatenate(
        (X_train.reshape(-1, 784), np.zeros((X_train.shape[0], 1))), axis=-1)

    model.fit([X_train, dummy_rand],
              y_train,
              batch_size=1024,
              callbacks=[cb, hist],
              epochs=20)

    history = hist.history
    with open("vae_history.json", "w") as fp:
        json.dump(history, fp)
 def train(self, epochs):
     train_data, valid_data = self.dataset.load_dataset(self.batch_size)
     history = History()
     callbacks = [
         ModelCheckpoint(filepath=f"{self.basedir}/models/checkpoint",
                         save_weights_only=True,
                         monitor='val_mIOU',
                         mode='max',
                         save_best_only=True),
         history,
     ]
     if self.enable_tensorboards:
         callbacks.append(
             TensorBoard(
                 log_dir=self.tensorboard_log,
                 histogram_freq=1,
                 write_images=True,
                 update_freq='epoch',
                 profile_batch='500,510',
                 embeddings_freq=1,
             ))
     self.model_backend.fit(train_data,
                            validation_data=valid_data,
                            epochs=epochs,
                            callbacks=callbacks)
     self.plot_segm_history(history)
     with open(f"{self.basedir}/train_history.json", 'w') as outfile:
         json.dump(history.history, outfile)
Exemple #6
0
def lstm_model(train_x, train_y, config):

    model = Sequential()
    model.add(
        LSTM(config.lstm_layers[0],
             input_shape=(train_x.shape[1], train_x.shape[2]),
             return_sequences=True))
    model.add(Dropout(config.dropout))

    model.add(LSTM(config.lstm_layers[1], return_sequences=False))
    model.add(Dropout(config.dropout))

    model.add(Dense(train_y.shape[1]))
    model.add(Activation("relu"))

    model.summary()

    cbs = [
        History(),
        EarlyStopping(monitor='val_loss',
                      patience=config.patience,
                      min_delta=config.min_delta,
                      verbose=0)
    ]
    model.compile(loss=config.loss_metric, optimizer=config.optimizer)
    model.fit(train_x,
              train_y,
              batch_size=config.lstm_batch_size,
              epochs=config.epochs,
              validation_split=config.validation_split,
              callbacks=cbs,
              verbose=True)
    return model
def train(batch_size, use_tpu, load_existing_weights):
    model = create_resnet()
    gen = CatGenerator()

    if load_existing_weights:
        model.load_weights("weights.hdf5")

    model.compile(tf.train.MomentumOptimizer(1e-3, 0.9), loss=loss_function_multiple_distance_and_area, metrics=[loss_function_simple])

    if use_tpu:
        tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"]
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url)
        strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
        model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)

    cb = CatsCallback(model)
    history = History()

    model.fit_generator(gen.flow_from_directory(batch_size, True), steps_per_epoch=6996//batch_size,
                        validation_data=gen.flow_from_directory(batch_size, False), validation_steps=2999//batch_size,
                        callbacks=[cb, history], epochs=200)

    with open("history.dat", "wb") as fp:
        pickle.dump(history.history, fp)

    with zipfile.ZipFile("cats_result.zip", "w") as zip:
        zip.write("history.dat")
        zip.write("cats_weights.hdf5")
    def train(input_data):
        OUTPUT_SHAPE = (10, 32, 32, 1)
        NUM_POINTS = 2000
        BATCH_SIZE = 16
        PRE_FETCH = 2
        EPOCHS = 10
        ignoreself = 1
        model = ModelConvLSTM.create_Model(ignoreself)
        model.compile(loss='mse', optimizer='adam')
        history = History()
        #filepath=str(os.getcwd())+"SavedModels/HPC2-{epoch:02d}.h5"

        #cp=ModelCheckpoint(filepath,verbose=1,save_best_only=False,mode='max',period=10)

        trainData = db.generatorParametersTrain(input_data, OUTPUT_SHAPE,
                                                NUM_POINTS, BATCH_SIZE,
                                                PRE_FETCH)
        valData = db.generatorParametersValidation(input_data, OUTPUT_SHAPE,
                                                   NUM_POINTS, BATCH_SIZE,
                                                   PRE_FETCH)
        history = model.fit(trainData,
                            epochs=EPOCHS,
                            verbose=1,
                            validation_data=valData)  # ,callbacks=[cp])

        return history
Exemple #9
0
    def __init__(self, input_size, hyperparameters, categorical_sizes):
        self.hyperparameters = hyperparameters
        self.categorical_sizes = categorical_sizes
        self.history = History()
        inputs = Input(shape=(input_size,))
        embedding_layers = list()
        for i, col_name in enumerate(sorted(list(categorical_sizes.keys()))):
            categorical_size = categorical_sizes[col_name]
            embedding_size = int(categorical_size ** (hyperparameters['embedding_factor']))
            ith_input_slice = Lambda(lambda x: x[:, i])(inputs)
            embedding = Embedding(categorical_size, embedding_size, input_length=1)(ith_input_slice)
            embedding_layers.append(embedding)
        numeric_inputs_slice = Lambda(lambda x: x[:, len(categorical_sizes):])(inputs)
        to_concat = embedding_layers + [numeric_inputs_slice]
        all_inputs = Concatenate(axis=1)(to_concat)
        hidden_input = all_inputs
        for block_params in self.hyperparameters['dense_blocks']:
            hidden_output = Dense(block_params['size'], activation='relu')(hidden_input)
            hidden_output = Dropout(block_params['dropout_rate'])(hidden_output)
            hidden_input = hidden_output
        outputs = Dense(1, activation='linear')(hidden_output)
        self.model = Model(inputs, outputs)
        # define optimization procedure
        self.lr_annealer = ReduceLROnPlateau(monitor='val_mean_squared_error', factor=hyperparameters['lr_plateau_factor'],
                                             patience=hyperparameters['lr_plateau_patience'], verbose=1)
        self.early_stopper = EarlyStopping(monitor='val_mean_squared_error', min_delta=hyperparameters['early_stopping_min_delta'],
                                           patience=hyperparameters['early_stopping_patience'], verbose=1)

        self.tensorboard = TensorBoard(log_dir='train_logs', histogram_freq=1)
        self.model.compile(optimizer=Adam(lr=hyperparameters['learning_rate']),
                           loss='mean_squared_error',
                           metrics=['mean_squared_error'])
    def train(self,
              companies,
              industries,
              split_rate=0.2,
              batch_size=128,
              patience=5,
              model_weight_path=_classifier_weights_path,
              model_graph_path=_classifier_graph_path,
              save_best_only=True,
              save_weights_only=True,
              epochs=100):
        """ Train the LSTM model. """
        companies = self._encode_company(companies, True)
        industries = self._encode_industry(industries, True)
        X_train, X_valid, y_train, y_valid = train_test_split(
            companies, industries, test_size=split_rate)
        valid_batch_size = min(batch_size, len(X_valid) // 3)
        train_gtr = KerasBatchGenerator(X_train, y_train, batch_size)
        valid_gtr = KerasBatchGenerator(X_valid, y_valid, valid_batch_size)

        earlystop = EarlyStopping(patience=patience)
        checkpoint = ModelCheckpoint(model_weight_path,
                                     save_best_only=save_best_only,
                                     save_weights_only=save_weights_only)
        history = History()

        model = Sequential()
        model.add(
            Embedding(input_dim=self._vocab_size,
                      output_dim=self._embedding_size,
                      input_length=self._pad_size))
        model.add(Conv1D(self._filters, self._kernel_size, activation='relu'))
        model.add(MaxPooling1D(self._pool_size))
        model.add(Dropout(rate=self._cnn_dropout))
        model.add(Flatten())
        model.add(Dense(self._ind_encoder.class_size, activation='sigmoid'))
        model.compile(optimizer=self._optimizer,
                      loss=self._loss,
                      metrics=self._metrics)

        model.fit_generator(train_gtr.generate(),
                            len(X_train) // batch_size,
                            epochs=epochs,
                            validation_data=valid_gtr.generate(),
                            validation_steps=len(X_valid) // valid_batch_size,
                            callbacks=[earlystop, checkpoint, history])
        for epoch in np.arange(0, len(model.history.history['loss'])):
            logger.info(
                f"Epoch={epoch + 1}, "
                f"{', '.join(f'{key}={value[epoch]}' for key, value in model.history.history.items())}"
            )

        # Save the model structure.
        with open(model_graph_path, 'w') as f:
            f.write(model.to_json())

        # Load the trained model.
        self._model = model
Exemple #11
0
def CNN(x_train, y_train, x_test, y_test):
    history = History()

    input_shape = x_train[0].shape
    print("one sample input shape to the neural network =  ", input_shape,
          "num of samples =  ", x_train.shape[0])
    batch_size = 5
    num_classes = 2
    epochs = 20

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')

    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               padding="same",
               activation='relu',
               input_shape=input_shape))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (3, 3), padding="same", activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, (3, 3), padding="same", activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, (3, 3), padding="same", activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(256, (3, 3), padding="same", activation='relu'))
    model.add(Conv2D(256, (3, 3), padding="same", activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(100, activation='relu'))

    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss=keras.losses.binary_crossentropy,
                  optimizer=keras.optimizers.Adam(),
                  metrics=['accuracy'])

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test, y_test),
              callbacks=[history])

    train_score = model.evaluate(x_train, y_train, verbose=0)
    print('Train loss: {}, Train accuracy: {}'.format(train_score[0],
                                                      train_score[1]))
    test_score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss: {}, Test accuracy: {}'.format(test_score[0],
                                                    test_score[1]))
    def train(self,
              names,
              genders,
              split_rate=0.2,
              batch_size=128,
              patience=5,
              model_weight_path=_classifier_weights_path,
              model_graph_path=_classifier_graph_path,
              save_best_only=True,
              save_weights_only=True,
              epochs=100):
        """ Train the LSTM model. """
        names = self._encode_name(names, True)
        genders = self._encode_gender(genders, True)
        X_train, X_valid, y_train, y_valid = train_test_split(
            names, genders, test_size=split_rate)
        valid_batch_size = min(batch_size, len(X_valid) // 3)
        train_gtr = KerasBatchGenerator(X_train, y_train, batch_size)
        valid_gtr = KerasBatchGenerator(X_valid, y_valid, valid_batch_size)

        earlystop = EarlyStopping(patience=patience)
        checkpoint = ModelCheckpoint(model_weight_path,
                                     save_best_only=save_best_only,
                                     save_weights_only=save_weights_only)
        history = History()

        self._model = Sequential()
        self._model.add(
            Embedding(self._name_encoder.char_size + 1,
                      output_dim=self._embedding_size))
        self._model.add(
            Bidirectional(LSTM(self._lstm_size1, return_sequences=True)))
        self._model.add(Dropout(rate=self._lstm_dropout1))
        self._model.add(Bidirectional(LSTM(self._lstm_size2)))
        self._model.add(Dropout(rate=self._lstm_dropout2))
        self._model.add(Dense(self._output_dim, activation='sigmoid'))
        self._model.compile(optimizer=self._optimizer,
                            loss=self._loss,
                            metrics=self._metrics)

        self._model.fit_generator(train_gtr.generate(),
                                  len(X_train) // batch_size,
                                  epochs=epochs,
                                  validation_data=valid_gtr.generate(),
                                  validation_steps=len(X_valid) //
                                  valid_batch_size,
                                  callbacks=[earlystop, checkpoint, history])
        for epoch in np.arange(0, len(self._model.history.history['loss'])):
            logger.info(
                f"Epoch={epoch + 1}, "
                f"{', '.join(f'{key}={value[epoch]}' for key, value in self._model.history.history.items())}"
            )

        # Save the model structure.
        with open(model_graph_path, 'w') as f:
            f.write(self._model.to_json())
Exemple #13
0
 def train(self, data: tf.keras.utils.Sequence, **kwargs):
     epochs = 1
     left_kwargs = copy.deepcopy(kwargs)
     if "aggregate_every_n_epoch" in kwargs:
         epochs = kwargs["aggregate_every_n_epoch"]
         del left_kwargs["aggregate_every_n_epoch"]
     left_kwargs["callbacks"] = [History()]
     self._model.fit(x=data, epochs=epochs, verbose=1, shuffle=True, **left_kwargs)
     self._loss = left_kwargs["callbacks"][0].history["loss"]
     return epochs * len(data)
Exemple #14
0
    def __getstate__(self):

        state = self.__dict__.copy()

        if hasattr(self, "model") and self.model is not None:
            buf = io.BytesIO()
            with h5py.File(buf, compression="lzf", mode="w") as h5:
                save_model(self.model, h5, overwrite=True, save_format="h5")
                buf.seek(0)
                state["model"] = buf
            if hasattr(self, "history"):
                from tensorflow.python.keras.callbacks import History

                history = History()
                history.history = self.history.history
                history.params = self.history.params
                history.epoch = self.history.epoch
                state["history"] = history
        return state
Exemple #15
0
def train():
    X_train, y_train = get_train_data()
    X_test, y_test = get_valid_data()
    # data generater
    train_gen = ImageDataGenerator(rescale=1.0 / 255,
                                   horizontal_flip=True,
                                   width_shift_range=4.0 / 32.0,
                                   height_shift_range=4.0 / 32.0)
    test_gen = ImageDataGenerator(rescale=1.0 / 255)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    # load network
    model = resnet50()
    #model.compile(Adam(0.001), "categorical_crossentropy", ["accuracy"])
    #model.compile(SGD(0.01, momentum = 0.9), "categorical_crossentropy", ["acc"])
    model.compile(SGD(0.01, momentum=0.9), "categorical_crossentropy",
                  ["acc", "top_k_categorical_accuracy"])
    model.summary()

    # set GPU
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    set_session(session)

    # set
    batch_size = 128
    scheduler = LearningRateScheduler(lr_scheduler)
    hist = History()

    start_time = time.time()

    model.fit_generator(train_gen.flow(X_train,
                                       y_train,
                                       batch_size,
                                       shuffle=True),
                        steps_per_epoch=X_train.shape[0] // batch_size,
                        validation_data=test_gen.flow(X_test,
                                                      y_test,
                                                      batch_size,
                                                      shuffle=False),
                        validation_steps=X_test.shape[0] // batch_size,
                        callbacks=[scheduler, hist],
                        max_queue_size=5,
                        epochs=50)

    elapsed = time.time() - start_time
    print('training time', elapsed)

    history = hist.history
    history["elapsed"] = elapsed

    with open("resnet50_2_DS_with_res_no_bias.pkl", "wb") as fp:
        pickle.dump(history, fp)
    def __init__(self, config, dataset):

        self.config = config
        # Training and testing datasets.
        self.dataset = dataset

        # ConvNet model.
        self.model = Sequential()

        # History object, holds training history.
        self.history = History()

        # Saved model path.
        self.saved_model_path = self.config.config_namespace.saved_model_path

        # Checkpoint for ConvNet model.
        self.checkpoint = ModelCheckpoint(
            self.saved_model_path,
            monitor='val_acc',
            verbose=self.config.config_namespace.checkpoint_verbose,
            save_best_only=True,
            mode='max')
        self.early_stopping = EarlyStopping(monitor='val_loss',
                                            min_delta=0,
                                            patience=10,
                                            mode='auto',
                                            restore_best_weights=True)

        # Callbacks list.
        self.callbacks_list = [self.checkpoint, self.early_stopping]

        # Evaluation scores.
        self.scores = []

        # Training time.
        self.train_time = 0

        # Predicted class labels.
        self.predictions = np.array([])
        self.predictions_one_hot = np.array([])

        # Construct the ConvNet model.
        self.define_model()

        # Configure the ConvNet model.
        self.compile_model()

        # Train the ConvNet model using testing dataset.
        self.fit_model()

        # Evaluate the ConvNet model using testing dataset.
        self.evaluate_model()

        # Predict the class labels of testing dataset.
        self.predict()
    def __init__(self,
                 stop_patience=10,
                 lr_factor=0.5,
                 lr_patience=1,
                 lr_epsilon=0.001,
                 lr_cooldown=4,
                 lr_minimum=1e-5,
                 outputDir=''):
        self.nl_begin = newline_callbacks_begin(outputDir)
        self.nl_end = newline_callbacks_end()

        self.stopping = EarlyStopping(monitor='val_loss',
                                      patience=stop_patience,
                                      verbose=1, mode='min')

        self.reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=lr_factor,
                                           patience=lr_patience,
                                           mode='min', verbose=1,
                                           epsilon=lr_epsilon,
                                           cooldown=lr_cooldown,
                                           min_lr=lr_minimum)

        self.modelbestcheck = ModelCheckpoint(
            outputDir + "/KERAS_check_best_model.h5",
            monitor='val_loss', verbose=1,
            save_best_only=True)

        self.modelbestcheckweights = ModelCheckpoint(
            outputDir + "/KERAS_check_best_model_weights.h5",
            monitor='val_loss', verbose=1,
            save_best_only=True, save_weights_only=True)

        self.modelcheckperiod = ModelCheckpoint(
            outputDir + "/KERAS_check_model_epoch{epoch:02d}.h5", verbose=1,
            period=10)

        self.modelcheck = ModelCheckpoint(
            outputDir + "/KERAS_check_model_last.h5", verbose=1)

        self.modelcheckweights = ModelCheckpoint(
            outputDir + "/KERAS_check_model_last_weights.h5", verbose=1,
            save_weights_only=True)

        self.tb = TensorBoard(log_dir=outputDir + '/logs')

        self.history = History()
        self.timer = Losstimer()

        self.callbacks = [
            self.nl_begin,
            self.modelbestcheck, self.modelbestcheckweights, self.modelcheck,
            self.modelcheckweights, self.modelcheckperiod,
            self.reduce_lr, self.stopping, self.nl_end, self.tb, self.history,
            self.timer
        ]
def model_fit_predict():
    """
    Training example was implemented according to machine-learning-mastery forum
    The function takes data from the dictionary returned from splitWindows.create_windows function
    https://machinelearningmastery.com/stateful-stateless-lstm-time-series-forecasting-python/
    :return: np.array of predictions
    """

    X, y, test_input = windows_dict['X'], windows_dict['y'], windows_dict[
        'X_test']

    # Predictions are stored in a list
    predictions = []

    with tqdm(total=X.shape[0],
              desc="Training the model, saving predictions") as progress_bar:

        # Save model History in order to check error data
        history = History()

        # build model framework
        current_model = model_builder(X)

        # Make predictions for each window
        for i in range(X.shape[0]):

            # TRAIN (FIT) model for each epoch
            # history = current_model.fit(
            #     input_X[i], target_X[i],
            #     epochs=_epochs, batch_size=batch,
            #     verbose=0, shuffle=False, validation_split=0.1,
            #     callbacks=[history]
            # )
            # print(X[i].shape, X[i].dtype, y[i].shape, y[i].dtype)
            for e in range(epochs):
                current_model.fit(X[i],
                                  y[i],
                                  epochs=1,
                                  batch_size=batch,
                                  verbose=0,
                                  shuffle=False,
                                  callbacks=[history])
                current_model.reset_states()

            # PREDICT and save results
            predictions.append(
                current_model.predict(test_input[i],
                                      batch_size=batch_test,
                                      verbose=0))

            progress_bar.update(1)

    return np.asarray(predictions)
Exemple #19
0
def train(alpha):
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    train_gen = ImageDataGenerator(rescale=1.0 / 255,
                                   horizontal_flip=True,
                                   width_shift_range=4.0 / 32.0,
                                   height_shift_range=4.0 / 32.0)
    test_gen = ImageDataGenerator(rescale=1.0 / 255)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    tf.logging.set_verbosity(tf.logging.FATAL)

    if alpha <= 0:
        model = create_normal_wide_resnet()
    else:
        model = create_octconv_wide_resnet(alpha)
    model.compile(SGD(0.1, momentum=0.9), "categorical_crossentropy", ["acc"])
    model.summary()

    # convert to tpu model
    tpu_grpc_url = "grpc://" + os.environ["COLAB_TPU_ADDR"]
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
        tpu_grpc_url)
    strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
    model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)

    batch_size = 128
    scheduler = LearningRateScheduler(lr_scheduler)
    hist = History()

    start_time = time.time()
    model.fit_generator(train_gen.flow(X_train,
                                       y_train,
                                       batch_size,
                                       shuffle=True),
                        steps_per_epoch=X_train.shape[0] // batch_size,
                        validation_data=test_gen.flow(X_test,
                                                      y_test,
                                                      batch_size,
                                                      shuffle=False),
                        validation_steps=X_test.shape[0] // batch_size,
                        callbacks=[scheduler, hist],
                        max_queue_size=5,
                        epochs=200)
    elapsed = time.time() - start_time
    print(elapsed)

    history = hist.history
    history["elapsed"] = elapsed

    with open(f"octconv_alpha_{alpha}.pkl", "wb") as fp:
        pickle.dump(history, fp)
    def update(self,
               names,
               genders,
               split_rate=0.2,
               batch_size=64,
               patience=1,
               model_weights_path=_classifier_weights_path,
               model_graph_path=_classifier_graph_path,
               model_weights_backup_path=_classifier_weights_backup_path,
               model_graph_backup_path=_classifier_graph_backup_path,
               save_best_only=True,
               save_weights_only=True,
               epochs=2):
        """ This function keep the original model, update the model and save it as default model. """
        names = self._encode_name(names, True)
        genders = self._encode_gender(genders, True)
        X_train, X_valid, y_train, y_valid = train_test_split(
            names, genders, test_size=split_rate)
        valid_batch_size = min(batch_size, len(X_valid) // 3)
        train_gtr = KerasBatchGenerator(X_train, y_train, batch_size)
        valid_gtr = KerasBatchGenerator(X_valid, y_valid, valid_batch_size)

        earlystop = EarlyStopping(patience=patience)
        checkpoint = ModelCheckpoint(model_weights_path,
                                     save_best_only=save_best_only,
                                     save_weights_only=save_weights_only)
        history = History()

        if not self._model:
            self.load()

        # Save the old model to backup.
        self._model.save_weights(model_weights_backup_path)
        with open(model_graph_backup_path, 'r') as f:
            f.write(self._model.to_json())

        self._model.fit_generator(train_gtr.generate(),
                                  len(X_train) // batch_size,
                                  epochs=epochs,
                                  validation_data=valid_gtr.generate(),
                                  validation_steps=len(X_valid) //
                                  valid_batch_size,
                                  callbacks=[earlystop, checkpoint, history])
        for epoch in np.arange(0, len(self._model.history.history['loss'])):
            logger.info(
                f"Epoch={epoch + 1}, "
                f"{', '.join(f'{key}={value[epoch]}' for key, value in self._model.history.history.items())}"
            )

        # Save the model structure.
        with open(model_graph_path, 'w') as f:
            f.write(self._model.to_json())
def eval_callbacks_list(base_path='F:\\AI-modelsaver\\GTSRB\\LeNet5'):
    eval_log_dir = os.path.join(base_path,
                                datetime.datetime.now().strftime("%Y-%m-%d"),
                                datetime.datetime.now().strftime("%H-%M-%S"),
                                'Tensorboard', 'evaluate')
    if not os.path.exists(eval_log_dir):
        os.makedirs(eval_log_dir)

    eval_callback = [
        TensorBoard(os.path.join(eval_log_dir), update_freq='batch'),
        History()
    ]
    return eval_callback
def appendix_trial(batch_size, use_tpu=True, sep=-1):
    tpu_flag = "tpu" if use_tpu else "gpu"
    filename = f"appendix_{tpu_flag}_batch_size_{batch_size}"
    if sep >= 0: filename += f"_sep_{sep}"
    filename += ".dat"

    result = {}

    for mode in range(3):
        if sep >= 0:
            if sep != mode: continue
        K.clear_session()
        model = create_wideresnet(7, 4, use_tpu)

        # mode 1 = そのままfit
        # mode 2 = バッチサイズの倍数に切り詰めてfit
        # mode 3 = fit_generator
        data_gen = ImageDataGenerator(rescale=1.0/255)

        nb_epochs = 20
        (X_train, y_train), (_, _) = cifar100.load_data()

        timer = Timer()
        hist = History()

        print("Start training...")
        print("mode = ", mode)

        if mode == 0:
            X_train = X_train / 255.0
            y_train = to_categorical(y_train)
            model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epochs, callbacks=[timer, hist])
        elif mode == 1:
            n_train = (X_train.shape[0] // batch_size) * batch_size
            X_train = X_train[:n_train, :, :, :] / 255.0
            y_train = to_categorical(y_train[:n_train, :])
            model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epochs, callbacks=[timer, hist])
        elif mode == 2:
            y_train = to_categorical(y_train)
            steps_per_epoch = X_train.shape[0] // batch_size
            model.fit_generator(data_gen.flow(X_train, y_train, batch_size=batch_size, shuffle=True),
                                steps_per_epoch=steps_per_epoch, epochs=nb_epochs, callbacks=[timer, hist])

        history = hist.history
        history["initial_time"] = timer.inital_time
        history["times"] = timer.times
        result[mode] = history

    with open(filename, "wb") as fp:
        pickle.dump(result, fp)
    return filename
Exemple #23
0
def create_callbacks(data_root, model_id, weights_label='weights', patience=4):
    filepath = data_root + 'models/' + model_id + '_' + weights_label + '.hdf5'
    estop_cb = EarlyStopping(monitor='val_loss',
                             min_delta=0,
                             patience=patience,
                             verbose=0,
                             mode='auto')
    save_best_cb = ModelCheckpoint(filepath,
                                   monitor='val_loss',
                                   verbose=0,
                                   save_best_only=True,
                                   save_weights_only=True,
                                   mode='auto',
                                   period=1)
    history_cb = History()
    return [estop_cb, save_best_cb, history_cb], filepath
Exemple #24
0
def load_run_render(model, frame):
    # Resize & Grayscale input images, reshape array
    im = cv2.resize(frame, (img_size, img_size))
    im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
    im = np.expand_dims(np.expand_dims(im, axis=0).astype(np.float32),
                        axis=-1) / 255

    # Fit model and return loss
    history = History()
    model.fit(im,
              im,
              epochs=1,
              batch_size=1,
              shuffle=False,
              callbacks=[history],
              verbose=0)
    return history.history['loss'][-1]
Exemple #25
0
 def fit(self, type = 'evaluation'):
     '''
     Fit the keras self.model
     '''
     # If model as already been fit, check that
     if self.error is not None: return self.error
     if self.model is None: self.model = self.build()
     self.history = History()
     self.history = self.model.fit(self.X_train, self.Y_train,
         epochs = self.epochs, callbacks=[self.history], verbose=self.verbose)
     # Assign fitness
     if self.final:
         self.error = self.history.history['loss'][-1]
     else:
         self.error = self.model.evaluate(self.X_test, self.Y_test,
             verbose=self.verbose) if type == 'evaluation' else self.history.history['loss'][-1]
     return self.error
Exemple #26
0
    def solve(self, train_X, train_Y, test_X, test_Y):
        """
        Initialize NN, train and test
        """
        input_dims = 1
        model = Sequential()
        model.add(
            Dense(self.hidden_neurons,
                  activation='relu',
                  input_shape=(input_dims, )))
        model.add(Dense(self.output_neurons, activation='linear'))

        sgd = optimizers.Adam(lr=0.05)
        model.compile(loss='mean_squared_error',
                      optimizer=sgd,
                      metrics=['accuracy'])

        history = History()
        callbacks = [
            history,
            ModelCheckpoint(filepath='best_model.h5',
                            monitor='val_loss',
                            save_best_only=True,
                            mode='auto')
        ]

        # Train
        model.fit(train_X,
                  train_Y,
                  epochs=self.epochs,
                  batch_size=self.batch_size,
                  verbose=1,
                  callbacks=callbacks)

        loss_and_metrics = model.evaluate(test_X,
                                          test_Y,
                                          batch_size=self.batch_size)
        # Predict
        y_pred = model.predict(test_X,
                               batch_size=self.batch_size,
                               verbose=0,
                               steps=None)

        return y_pred
def execute():
    data_in_chan = 1
    data_out_chan = 1
    data_x = 256
    data_y = 256*data_in_chan    
    model_x = 256
    model_y = 256
    batch_size = 10
    num_epochs = 25

    X_folder = 'C:\\Users\\axm3\\Documents\\data\\deepMRAC\\data\\bravo-tiff\\X'
    Y_folder = 'C:\\Users\\axm3\\Documents\\data\\deepMRAC\\data\\bravo-tiff\\Y'

    X_progress_file = 'C:\\Users\\axm3\\Documents\\data\\deepMRAC\\data\\bravo-tiff\\X\\train\\X_00001_00000001.tiff'
    Y_progress_file = 'C:\\Users\\axm3\\Documents\\data\\deepMRAC\\data\\bravo-tiff\\Y\\train\\Y_00001_00000001.tiff'

    print('creating model')
    model = Unet.UNetContinuous([model_x,model_y,data_in_chan],out_ch=data_out_chan,start_ch=16,depth=4,inc_rate=2.,activation='relu',dropout=0.5,batchnorm=True,maxpool=True,upconv=True,residual=False)
    model = deeprad_keras_tools.wrap_model( model, (data_x,data_y,1), (data_x,data_y,1), (model_x,model_y,1), (model_x,model_y,1) )    
    model.compile(optimizer=Adam(lr=1e-3), loss=smooth_L1_loss, metrics=[smooth_L1_loss,losses.mean_squared_error,losses.mean_absolute_error])
    model.summary()

    print('creating data generators')
    train_gen = deeprad_keras_tools.get_keras_tiff_generator( os.path.join(X_folder,'train'), os.path.join(Y_folder,'train'), batch_size )
    val_gen = deeprad_keras_tools.get_keras_tiff_generator( os.path.join(X_folder,'val'), os.path.join(Y_folder,'val'), batch_size )

    print('creating callbacks')
    history = History()
    modelCheckpoint = ModelCheckpoint('weights.h5', monitor='loss', save_best_only=True)
    tblogdir = 'tblogs/{}'.format(time())
    tensorboard = TensorBoard(log_dir=tblogdir)
    X_progress = deeprad_keras_tools.read_images( [X_progress_file] )
    Y_progress = deeprad_keras_tools.read_images( [Y_progress_file] )
    tensorboardimage = deeprad_keras_tools.TensorBoardIm2ImCallback(log_dir=tblogdir,X=X_progress,Y=Y_progress)

    print('fitting model')
    model.fit_generator( train_gen,
                        validation_data=val_gen,
                        epochs=num_epochs,
                        use_multiprocessing=True,
                        max_queue_size=20,
                        workers=3,
                        callbacks=[history, modelCheckpoint, tensorboard, tensorboardimage] )
Exemple #28
0
def train(use_ricap):
    batch_size = 1024
    if use_ricap:
        train_gen_instance = RICAPGenerator(rescale=1.0 / 255,
                                            width_shift_range=15.0 / 160,
                                            height_shift_range=15.0 / 160,
                                            horizontal_flip=True,
                                            ricap_beta=0.3)
    else:
        train_gen_instance = ImageDataGenerator(rescale=1.0 / 255,
                                                width_shift_range=15.0 / 160,
                                                height_shift_range=15.0 / 160,
                                                horizontal_flip=True)
    train_gen = train_gen_instance.flow_from_directory(
        "animeface-character-dataset/train",
        target_size=(160, 160),
        batch_size=batch_size)
    test_gen = ImageDataGenerator(rescale=1.0 / 255).flow_from_directory(
        "animeface-character-dataset/test",
        target_size=(160, 160),
        batch_size=batch_size)

    model = create_network()
    model.compile(tf.train.RMSPropOptimizer(1e-4), "categorical_crossentropy",
                  ["acc"])

    tpu_grpc_url = "grpc://" + os.environ["COLAB_TPU_ADDR"]
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
        tpu_grpc_url)
    strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
    model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)

    hist = History()
    model.fit_generator(train_gen,
                        steps_per_epoch=10062 // batch_size,
                        callbacks=[hist],
                        validation_data=test_gen,
                        validation_steps=4428 // batch_size,
                        epochs=1)

    history = hist.history
    with open(f"anime_ricap_{use_ricap}.dat", "wb") as fp:
        pickle.dump(history, fp)
def train():
    # データの読み込み
    data = np.load("wiki_crop/wiki_all.npz")
    image, gender, age = data["image"], data["gender"], data["age"]
    # TrainとTestのSplit(インデックスを指定するだけ)
    np.random.seed(45)
    indices = np.random.permutation(image.shape[0])
    n_test = 8192  # 8192枚をテストとする
    test_indices = indices[:n_test]
    train_indices = indices[n_test:]

    # モデルの作成
    model = create_model()
    # 損失関数を自作してコンパイル
    model.compile(tf.train.RMSPropOptimizer(3e-3), multitask_loss)
    # TPUモデルに変換
    tpu_grpc_url = "grpc://" + os.environ["COLAB_TPU_ADDR"]
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
        tpu_grpc_url)
    strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
    model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)

    # 訓練
    batch_size = 512
    history = History()
    checkpoint = Checkpoint(model)
    model.fit_generator(data_generator(image, gender, age, train_indices,
                                       batch_size, True),
                        steps_per_epoch=len(train_indices) // batch_size,
                        validation_data=data_generator(image, gender, age,
                                                       test_indices,
                                                       batch_size, False),
                        validation_steps=len(test_indices) // batch_size,
                        max_queue_size=1,
                        callbacks=[history, checkpoint],
                        epochs=50)

    # 結果保存
    hist = history.history
    with open("history.dat", "wb") as fp:
        pickle.dump(hist, fp)
Exemple #30
0
    def train_new(self, channel):
        """
        Train LSTM model according to specifications in config.yaml.

        Args:
            channel (obj): Channel class object containing train/test data
                for X,y for a single channel
        """

        cbs = [History(), EarlyStopping(monitor='val_loss',
                                        patience=self.config.patience,
                                        min_delta=self.config.min_delta,
                                        verbose=0)]

        self.model = Sequential()

        self.model.add(LSTM(
            self.config.layers[0],
            # input_shape=(None, channel.X_train.shape[2]),
            return_sequences=True))
        self.model.add(Dropout(self.config.dropout))

        self.model.add(LSTM(
            self.config.layers[1],
            return_sequences=False))
        self.model.add(Dropout(self.config.dropout))

        self.model.add(Dense(
            self.config.n_predictions))
        self.model.add(Activation('linear'))

        self.model.compile(loss=self.config.loss_metric,
                           optimizer=self.config.optimizer)

        self.model.fit(channel.X_train,
                       channel.y_train,
                       batch_size=self.config.lstm_batch_size,
                       epochs=self.config.epochs,
                       validation_split=self.config.validation_split,
                       callbacks=cbs,
                       verbose=True)