Пример #1
0
def inference(dataset_):
    is_train_pl = tf.placeholder(tf.bool)
    img_pl, _, _ = model.placeholder_inputs(BATCH_SIZE, IM_DIM, VOL_DIM)

    pred_reg_clr, pred_conf, pred_flow, pred_blended_clr = model.get_model(
        img_pl, is_train_pl)

    config = tf.ConfigProto()
    config.gpu_options.allocator_type = 'BFC'
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True

    with tf.Session(config=config) as sess:
        model_path = os.path.join(TRAIN_DIR, "trained_models")
        ckpt = tf.train.get_checkpoint_state(model_path)
        restorer = tf.train.Saver()
        restorer.restore(sess, ckpt.model_checkpoint_path)

        test_samples = dataset_.getTestSampleSize()

        for batch_idx in range(test_samples):
            imgs, view_names = dataset_.next_test_batch(batch_idx, 1)

            feed_dict = {img_pl: imgs, is_train_pl: False}
            res_reg_clr, res_conf, res_flow, res_blended_clr = sess.run(
                [pred_reg_clr, pred_conf, pred_flow, pred_blended_clr],
                feed_dict=feed_dict)

            for i in range(len(view_names)):
                vol_reg_clr = res_reg_clr[i]  # (vol_dim, vol_dim, vol_dim, 3)
                vol_conf = res_conf[i]  # (vol_dim, vol_dim, vol_dim, 1)
                vol_flow = res_flow[i]  # (vol_dim, vol_dim, vol_dim, 2)
                vol_blended_clr = res_blended_clr[
                    i]  # (vol_dim, vol_dim, vol_dim, 3)

                cloth = view_names[i][0]
                mesh = view_names[i][1]
                name_ = view_names[i][2][:-4]

                save_path = os.path.join(OUTPUT_DIR, cloth, mesh)
                if not os.path.exists(save_path):
                    os.makedirs(save_path)

                save_path_name = os.path.join(save_path, name_ + ".h5")
                if os.path.exists(save_path_name):
                    os.remove(save_path_name)

                vol_ = np.concatenate(
                    (vol_reg_clr, vol_conf, vol_flow, vol_blended_clr),
                    axis=-1)

                h5_fout = h5py.File(save_path_name)
                h5_fout.create_dataset('data',
                                       data=vol_,
                                       compression='gzip',
                                       compression_opts=4,
                                       dtype='float32')
                h5_fout.close()

                print(batch_idx, save_path_name)
Пример #2
0
def train(dataset_):
    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):
            is_train_pl = tf.placeholder(tf.bool)
            img_pl, vol_clr_pl, vol_flow_pl = model.placeholder_inputs(
                BATCH_SIZE, IM_DIM, VOL_DIM)

            global_step = tf.Variable(0)
            bn_decay = get_bn_decay(global_step)

            pred_reg_clr, pred_conf, pred_flow, pred_blended_clr = model.get_model(
                img_pl, is_train_pl, weight_decay=args.wd, bn_decay=bn_decay)
            loss = model.get_loss(pred_reg_clr, pred_blended_clr, vol_clr_pl,
                                  pred_flow, vol_flow_pl)

            learning_rate = get_learning_rate(global_step)
            optimizer = tf.train.AdamOptimizer(learning_rate)
            train_op = optimizer.minimize(loss, global_step=global_step)

            saver = tf.train.Saver()

        config = tf.ConfigProto()
        config.gpu_options.allocator_type = 'BFC'
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True

        with tf.Session(config=config) as sess:
            model_path = os.path.join(TRAIN_DIR, "trained_models")
            if tf.gfile.Exists(os.path.join(model_path, "checkpoint")):
                ckpt = tf.train.get_checkpoint_state(model_path)
                restorer = tf.train.Saver()
                restorer.restore(sess, ckpt.model_checkpoint_path)
                print("Load parameters from checkpoint.")
            else:
                sess.run(tf.global_variables_initializer())

            train_sample_size = dataset_.getTrainSampleSize()
            train_batches = train_sample_size // BATCH_SIZE  # The number of batches per epoch

            val_sample_size = dataset_.getValSampleSize()
            val_batches = val_sample_size // BATCH_SIZE

            for epoch in range(TRAIN_EPOCHS):
                ####################
                # For training
                ####################
                dataset_.shuffleIds()
                for batch_idx in range(train_batches):
                    imgs, vols_flow, vols_clr = dataset_.next_flow_batch(
                        batch_idx * BATCH_SIZE, BATCH_SIZE, vol_dim=VOL_DIM)
                    feed_dict = {
                        img_pl: imgs,
                        vol_clr_pl: vols_clr,
                        vol_flow_pl: vols_flow,
                        is_train_pl: True
                    }

                    step = sess.run(global_step)
                    _, loss_val = sess.run([train_op, loss],
                                           feed_dict=feed_dict)

                    log_string("<TRAIN> Epoch {} - Batch {}: loss: {}.".format(
                        epoch, batch_idx, loss_val))

                #####################
                # For validation
                #####################
                loss_sum = 0.0
                for batch_idx in range(val_batches):
                    imgs, vols_flow, vols_clr = dataset_.next_flow_batch(
                        batch_idx * BATCH_SIZE,
                        BATCH_SIZE,
                        vol_dim=VOL_DIM,
                        process="val")
                    feed_dict = {
                        img_pl: imgs,
                        vol_clr_pl: vols_clr,
                        vol_flow_pl: vols_flow,
                        is_train_pl: False
                    }

                    loss_val = sess.run(loss, feed_dict=feed_dict)
                    loss_sum += loss_val
                log_string("<VAL> Epoch {}: loss: {}.".format(
                    epoch, loss_sum / val_batches))

                #####################
                # Save model parameters.
                #####################
                if epoch % args.epochs_to_save == 0:
                    checkpoint_path = os.path.join(model_path, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=epoch)