Exemplo n.º 1
0
    def train(self):
        # Validation and Train dataset generation

        train_data = DataLoader(data_name=self.args.data4train, arg=self.args)
        n_train =train_data.indices.size #data_cache["n_files"]
        val_data = DataLoader(data_name=self.args.data4train,
                              arg=self.args, is_val=True)
        val_idcs = np.arange(val_data.indices.size)
        # Summary and checkpoint manager
        model_dir =self.args.model_name+'2'+self.args.data4train
        summary_dir = os.path.join('logs',model_dir)
        train_log_dir=os.path.join(summary_dir,'train')
        val_log_dir =os.path.join(summary_dir,'test')

        checkpoint_dir = os.path.join(self.args.checkpoint_dir,model_dir)
        epoch_ckpt_dir = checkpoint_dir + 'epochs'
        os.makedirs(epoch_ckpt_dir, exist_ok=True)
        os.makedirs(train_log_dir,exist_ok=True)
        os.makedirs(val_log_dir,exist_ok=True)
        os.makedirs(checkpoint_dir, exist_ok=True)

        train_writer = tf.summary.create_file_writer(train_log_dir)
        val_writer = tf.summary.create_file_writer(val_log_dir)

        my_model = DexiNed(rgb_mean=self.args.rgbn_mean)#rgb_mean=self.args.rgbn_mean

        # accuracy = metrics.SparseCategoricalAccuracy()
        accuracy = metrics.BinaryAccuracy()
        accuracy_val = metrics.BinaryAccuracy()
        loss_bc = losses.BinaryCrossentropy()
        optimizer = optimizers.Adam(
            learning_rate=self.args.lr, beta_1=self.args.beta1)
        iter = 0

        imgs_res_folder = os.path.join(self.args.output_dir, "current_training")
        os.makedirs(imgs_res_folder, exist_ok=True)
        global_loss = 1000.
        t_loss = []
        ckpt_save_mode = "h5"
        for epoch in range(self.args.max_epochs):
            # training
            t_loss = []
            for step, (x, y) in enumerate(train_data):

                with tf.GradientTape() as tape:
                    pred = my_model(x, training=True)

                    preds, loss = pre_process_binary_cross_entropy(
                        loss_bc, pred, y, self.args, use_tf_loss=False)

                accuracy.update_state(y_true=y, y_pred=preds[-1])
                gradients = tape.gradient(loss, my_model.trainable_variables)
                optimizer.apply_gradients(zip(gradients, my_model.trainable_variables))

                # logging the current accuracy value so far.
                t_loss.append(loss.numpy())
                if step % 10 == 0:
                    print("Epoch:", epoch, "Step:", step, "Loss: %.4f" % loss.numpy(),
                          "Accuracy: %.4f" % accuracy.result(), time.ctime())

                if step % 10 == 0:
                    # visualize preds
                    img_test = 'Epoch: {0} Sample {1}/{2} Loss: {3}' \
                        .format(epoch, step, n_train // self.args.batch_size, loss.numpy())
                    vis_imgs = visualize_result(
                        x=x[2], y=y[2], p=preds, img_title=img_test)
                    cv.imwrite(os.path.join(imgs_res_folder, 'results.png'), vis_imgs)
                if step % 20 == 0 and loss < global_loss:  # 500
                    if epoch==0 and step==0:
                        tmp_loss = np.array(t_loss)
                        with train_writer.as_default():
                            tf.summary.scalar('loss', tmp_loss.mean(), step=epoch)
                            tf.summary.scalar('accuracy', accuracy.result(), step=epoch)

                    save_ckpt_path = os.path.join(checkpoint_dir, "DexiNedL_model.h5")
                    Model.save_weights(my_model, save_ckpt_path, save_format='h5')

                    global_loss = loss
                    print("Model saved in:  ", save_ckpt_path, "Current loss:", global_loss.numpy())

                iter += 1  # global iteration

            t_loss = np.array(t_loss)
            # train summary
            if epoch!=0:
                with train_writer.as_default():
                    tf.summary.scalar('loss', t_loss.mean(), step=epoch)
                    tf.summary.scalar('accuracy', accuracy.result(), step=epoch)

            Model.save_weights(my_model, os.path.join(epoch_ckpt_dir, "DexiNed{}_model.h5".format(str(epoch))),
                               save_format=ckpt_save_mode)
            print("Epoch:", epoch, "Model saved in Loss: ", t_loss.mean())

            # validation
            t_val_loss = []
            for i, (x_val, y_val) in enumerate(val_data):

                pred_val = my_model(x_val)
                v_logits, V_loss = pre_process_binary_cross_entropy(
                    loss_bc, pred_val, y_val, self.args, use_tf_loss=False)
                accuracy_val.update_state(y_true=y_val, y_pred=v_logits[-1])
                t_val_loss.append(V_loss.numpy())
                if i == 7:
                    break
            val_acc = accuracy_val.result()
            t_val_loss = np.array(t_val_loss)
            print("Epoch(validation):", epoch, "Val loss: ", t_val_loss.mean(),
                  "Accuracy: ", val_acc.numpy())
            # validation summary
            with val_writer.as_default():
                tf.summary.scalar('loss', t_val_loss.mean(), step=epoch)
                tf.summary.scalar('accuracy', val_acc.numpy(), step=epoch)

            # Reset metrics every epoch
            accuracy.reset_states()
            accuracy_val.reset_states()

        my_model.summary()
Exemplo n.º 2
0
        for i in range(len(Y_hat_name)):
            tmp_name = Y_hat_name[i]
            tmp_name = tmp_name.replace("RGBNC", "RGBNP")
            h5_writer(savepath=os.path.join(Yhat_dir, tmp_name),
                      data=np.squeeze(Y_hat[i, :, :, :]))
    else:
        tmp_name = Y_hat_name
        tmp_name = tmp_name.replace("RGBN", "RGBNP")
        h5_writer(savepath=os.path.join(Yhat_dir, tmp_name),
                  data=np.squeeze(Y_hat))


if arg.model_name == "CDNet" or arg.model_name == "ENDENet":

    if not arg.is_training:
        data4testing = DataLoader(data_name=arg.dataset_name, arg=arg)

        model_dir = arg.model_name.lower() + '2' + arg.dataset_name
        res_dir = os.path.join('results', model_dir)
        os.makedirs(res_dir, exist_ok=True)
        ckpnt_dir = os.path.join(arg.ckpt_dir, model_dir)
        ckpnt_path = os.path.join(ckpnt_dir, 'saved_weights.h5')

        my_model = CDENT()

        loss_mse = tfk.losses.mean_squared_error
        optimizer = tfk.optimizers.Adam(learning_rate=arg.lr)
        my_model.compile(optimizer=optimizer, loss=loss_mse, metrics='mse')
        input_shape = data4testing.input_shape
        my_model.build(input_shape=input_shape)
        my_model.load_weights(filepath=ckpnt_path)
Exemplo n.º 3
0
    def test(self):
        # Test dataset generation

        test_data = DataLoader(data_name=self.args.data4test, arg=self.args)
        n_test = test_data.indices.size  # data_cache["n_files"]

        optimizer = tf.keras.optimizers.Adam(
            learning_rate=self.args.lr, beta_1=self.args.beta1)

        my_model = DexiNed(rgb_mean=self.args.rgbn_mean)
        input_shape = test_data.input_shape
        my_model.build(input_shape=input_shape)  # rgb_mean=self.args.rgbn_mean

        checkpoit_dir = os.path.join(self.args.checkpoint_dir,
                                     self.args.model_name + "2" + self.args.data4train)

        my_model.load_weights(os.path.join(checkpoit_dir, "DexiNed23_model.h5"))

        result_dir = os.path.join(
            self.args.output_dir,
            self.args.model_name + '-' + self.args.data4train + "2" + self.args.data4test)
        os.makedirs(result_dir, exist_ok=True)
        if self.args.scale is not None:
            scl = self.args.scale
            save_dir = ['fuse_'+str(scl), 'avrg_'+str(scl), 'h5_'+str(scl)]
        else:
            save_dir = ['fuse', 'avrg', 'h5']
        save_dirs = []
        for tmp_dir in save_dir:
            os.makedirs(os.path.join(result_dir, tmp_dir), exist_ok=True)
            save_dirs.append(os.path.join(result_dir, tmp_dir))

        total_time = []
        data_names = test_data.imgs_name
        data_shape = test_data.imgs_shape
        k = 0
        for step, (x, y) in enumerate(test_data):

            start_time = time.time()
            preds = my_model(x, training=False)
            tmp_time = time.time() - start_time
            total_time.append(tmp_time)

            preds = [tf.sigmoid(i).numpy() for i in preds]
            all_preds = np.array(preds)
            for i in range(all_preds.shape[1]):
                tmp_name = data_names[k]
                tmp_name, _ = os.path.splitext(tmp_name)
                tmp_shape = data_shape[k]

                tmp_preds = all_preds[:, i, ...]
                tmp_av = np.expand_dims(tmp_preds.mean(axis=0), axis=0)
                tmp_preds = np.concatenate((tmp_preds, tmp_av), axis=0)
                res_preds = []
                for j in range(tmp_preds.shape[0]):
                    tmp_pred = tmp_preds[j, ...]
                    tmp_pred[tmp_pred < 0.0] = 0.0
                    tmp_pred = cv.bitwise_not(np.uint8(image_normalization(tmp_pred)))
                    h, w = tmp_pred.shape[:2]
                    if h != tmp_shape[0] or w != tmp_shape[1]:
                        tmp_pred = cv.resize(tmp_pred, (tmp_shape[1], tmp_shape[0]))
                    res_preds.append(tmp_pred)
                n_save =len(tmp_preds)-2
                for idx in range(len(save_dirs) - 1):
                    s_dir = save_dirs[idx]
                    tmp = res_preds[n_save + idx]
                    cv.imwrite(join(s_dir, tmp_name + '.png'), tmp)
                h5_writer(path=join(save_dirs[-1], tmp_name + '.h5'),
                          vars=np.squeeze(res_preds))
                print("saved:", join(save_dirs[-1], tmp_name + '.h5'), tmp_preds.shape)
                k += 1

            # tmp_name = data_names[step][:-3]+"png"
            # tmp_shape = data_shape[step]
            # tmp_path = os.path.join(result_dir,tmp_name)
            # tensor2image(preds[-1].numpy(), img_path =tmp_path,img_shape=tmp_shape)

        total_time = np.array(total_time)

        print('-------------------------------------------------')
        print("End testing in: ", self.args.data4test)
        print("Batch size: ", self.args.test_bs)
        print("Time average per image: ", total_time.mean(), "secs")
        print("Total time: ", total_time.sum(), "secs")
        print('-------------------------------------------------')
Exemplo n.º 4
0
def train():
    if arg.model_name.lower() == "cdnet" or arg.model_name.lower(
    ) == "endenet":
        # ***************data preparation *********************
        if arg.model_state.lower() == 'train':

            # dataset preparation for training
            running_mode = 'train'
            data4training = DataLoader(data_name=arg.dataset_name, arg=arg)
            # define model and callbacks
            model_dir = arg.model_name.lower() + '2' + arg.dataset_name
            ckpnt_dir = os.path.join(arg.ckpt_dir, model_dir)
            ckpnt_path = os.path.join(ckpnt_dir, 'saved_weights.h5')
            os.makedirs(ckpnt_dir, exist_ok=True)
            log_dir = os.path.join('logs', model_dir)
            res_dir = os.path.join('results', model_dir)
            os.makedirs(res_dir, exist_ok=True)

            my_callbacks = [
                tfk.callbacks.ModelCheckpoint(
                    ckpnt_path,
                    monitor='train_loss',  # os.path.join(ckpnt,saved_weights.h5)
                    save_weights_only=True,
                    mode='auto',
                    save_freq='epoch'),
                tfk.callbacks.TensorBoard(log_dir,
                                          histogram_freq=0,
                                          write_graph=True,
                                          profile_batch=2,
                                          write_images=True)
            ]
            my_model = CDENT()

            loss_mse = tfk.losses.mean_squared_error
            accuracy = tfk.metrics.MeanAbsolutePercentageError()
            optimizer = tfk.optimizers.Adam(learning_rate=arg.lr, beta_1=0.5)
            # compile model
            # my_model.compile(optimizer=optimizer, loss=loss_mse)
            # my_model.fit(data4training, epochs=arg.num_epochs,callbacks=my_callbacks)

            for epoch in range(arg.num_epochs):
                total_loss = tf.Variable(0.)
                for step, (x, y) in enumerate(data4training):

                    with tf.GradientTape() as tape:
                        p = my_model(x)
                        loss = loss_mse(y_true=y, y_pred=p)
                        loss = tf.math.reduce_sum(loss)
                    total_loss = tf.add(total_loss, loss)
                    accuracy.update_state(y_true=y, y_pred=p)
                    gradients = tape.gradient(loss,
                                              my_model.trainable_variables)
                    optimizer.apply_gradients(
                        zip(gradients, my_model.trainable_variables))

                    if step % 10 == 0:
                        print("Epoch:", epoch, "Step:", step,
                              "Loss: %.4f" % loss.numpy(),
                              "Accuracy: %.4f" % accuracy.result(),
                              time.ctime())

                tfk.Model.save_weights(my_model, ckpnt_path, save_format='h5')
                print('Model saved in:', ckpnt_path)

                # visualize result
                mean_loss = total_loss / 50
                tmp_x = image_normalization(np.squeeze(x[2, :, :, :3]))
                tmp_y = image_normalization(np.squeeze(y[2, ...]))
                tmp_p = p[2, ...]
                tmp_p = image_normalization(tmp_p.numpy())
                vis_imgs = np.uint8(
                    np.concatenate((tmp_x, tmp_y, tmp_p), axis=1))
                img_test = 'Epoch: {0}  Loss: {1}'.format(
                    epoch, mean_loss.numpy())
                BLACK = (0, 0, 255)
                font = cv.FONT_HERSHEY_SIMPLEX
                font_size = 1.1
                font_color = BLACK
                font_thickness = 2
                x, y = 30, 30
                vis_imgs = cv.putText(vis_imgs, img_test, (x, y), font,
                                      font_size, font_color, font_thickness,
                                      cv.LINE_AA)
                cv.imwrite(os.path.join(res_dir, ' results.png'), vis_imgs)

                print("<<< End epoch loss: ", mean_loss.numpy(), " >>>")

            my_model.summary()
            print('Training finished on: ', arg.dataset_name)