Exemple #1
0
def eval_alone(config):
    data = model_data.read_data(config.data, config, read_train=False)
    data = data.test
    seq_rnn_model = SequenceRNNModel(config.n_input_fc, config.num_views, config.n_hidden, config.decoder_embedding_size, config.num_classes+1, config.n_hidden,
                                     batch_size=data.size(),
                                     is_training=False,
                                     use_lstm=config.use_lstm,
                                     use_attention=config.use_attention,
                                     use_embedding=config.use_embedding, num_heads=config.num_heads)
    seq_rnn_model.build_model("train")
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with tf.Session(config=tf_config) as sess:
    #config.gpu_options.per_process_gpu_memory_fraction = 0.3
        saver = tf.train.Saver()
        saver.restore(sess, get_modelpath(config.weights))
        acc, loss, predictions, labels = _test(data, seq_rnn_model, sess)
    log(config.log_file, "TESTING ACCURACY {}".format(acc))
    
    predictions = [x-1 for x in predictions]  
    labels = [x-1 for x in labels]
    
    import Evaluation_tools as et
    eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
    et.write_eval_file(config.data, eval_file, predictions, labels, config.name)
    et.make_matrix(config.data, eval_file, config.log_dir)    
Exemple #2
0
def eval_during_training(weights, model, epoch):
    data = model_data.read_data(config.data, config, read_train=False)
    data = data.test
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with tf.Session(config=tf_config) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        model.assign_weights(sess, weights, "eval")
        acc, loss, _, _ = _test(data, model, sess)
    log(config.log_file, 'TEST: EPOCH {} LOSS {} ACCURACY {}'.format(epoch, loss, acc))
    LOSS_LOGGER.log( loss, epoch,"eval_loss")
    ACC_LOGGER.log( acc, epoch, "eval_accuracy")
Exemple #3
0
def eval_during_training(weights, model, epoch):
    data = model_data.read_data(config.data, config, read_train=False)
    data = data.test
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with tf.Session(config=tf_config) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        model.assign_weights(sess, weights, "eval")
        acc, loss, _, _ = _test(data, model, sess)
    print("evaluation, acc=%f" %(acc[0]))
    LOSS_LOGGER.log(loss, epoch,"eval_loss")
    ACC_LOGGER.log(acc[0],epoch, "eval_accuracy")
Exemple #4
0
def train():
    data =  model_data.read_data(FLAGS.data_path, n_views=FLAGS.n_views)
    seq_rnn_model = SequenceRNNModel(FLAGS.n_input_fc, FLAGS.n_views, FLAGS.n_hidden, FLAGS.decoder_embedding_size, FLAGS.n_classes+1, FLAGS.n_hidden,
                                     learning_rate=FLAGS.learning_rate,
                                     keep_prob=FLAGS.keep_prob,
                                     batch_size=FLAGS.batch_size,
                                     is_training=True,
                                     use_lstm=FLAGS.use_lstm,
                                     use_attention=FLAGS.use_attention,
                                     use_embedding=FLAGS.use_embedding,
                                     num_heads=FLAGS.num_heads)
                                     #init_decoder_embedding=model_data.read_class_yes_embedding(FLAGS.data_path))
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 0.5
    if not os.path.exists(get_modelpath()):
        os.makedirs(get_modelpath())
    with tf.Session(config=config) as sess:
        seq_rnn_model.build_model()
        saver = tf.train.Saver(max_to_keep=FLAGS.n_max_keep_model)
        init = tf.global_variables_initializer()
        sess.run(init)
        #saver.restore(sess, "/home1/shangmingyang/data/3dmodel/mvmodel_result/best/modelnet40_128_256_0.0002_1.0_0.9331/seq_mvmodel.ckpt-10")

        epoch = 1
        while epoch <= FLAGS.training_epoches:
            batch = 1
            while batch * FLAGS.batch_size <= data.train.size():
                batch_encoder_inputs, batch_decoder_inputs = data.train.next_batch(FLAGS.batch_size)
                # target_labels = get_target_labels(batch_decoder_inputs)
                batch_encoder_inputs = batch_encoder_inputs.reshape((FLAGS.batch_size, FLAGS.n_views, FLAGS.n_input_fc))
                batch_encoder_inputs, batch_decoder_inputs, batch_target_weights = seq_rnn_model.get_batch(batch_encoder_inputs, batch_decoder_inputs, batch_size=FLAGS.batch_size)
                _, loss, _, _ = seq_rnn_model.step(sess, batch_encoder_inputs, batch_decoder_inputs, batch_target_weights,forward_only=False)
                # predict_labels = seq_rnn_model.predict(outputs)
                # acc = accuracy(predict_labels, target_labels)
                print("epoch %d batch %d: loss=%f" %(epoch, batch, loss))
                batch += 1
            # if epoch % display_epoch == 0:
            #     print("epoch %d:display" %(epoch))
            if epoch % FLAGS.save_epoches == 0:
                saver.save(sess, get_modelpath(), global_step=epoch)
            #     # do test using test dataset
            #     test_encoder_inputs, test_decoder_inputs = data.test.next_batch(data.test.size())
            #     target_labels = get_target_labels(test_decoder_inputs)
            #     test_encoder_inputs = test_encoder_inputs.reshape((-1, n_steps, n_input))
            #     test_encoder_inputs, test_decoder_inputs, test_target_weights = seq_rnn_model.get_batch(test_encoder_inputs, test_decoder_inputs, batch_size=data.test.size())
            #     _, _, outputs = seq_rnn_model.step(sess, test_encoder_inputs, test_decoder_inputs, test_target_weights, forward_only=True) # don't do optimize
            #     predict_labels = seq_rnn_model.predict(outputs)
            #     acc = accuracy(predict_labels, target_labels)
            #     print("epoch %d:save, acc=%f" %(epoch, acc))
            epoch += 1
Exemple #5
0
def test():
    data = model_data.read_data(FLAGS.data_path, n_views=FLAGS.n_views, read_train=False)
    test_data = data.test
    seq_rnn_model = SequenceRNNModel(FLAGS.n_input_fc, FLAGS.n_views, FLAGS.n_hidden, FLAGS.decoder_embedding_size, FLAGS.n_classes+1, FLAGS.n_hidden,
                                     batch_size=test_data.size(),
                                     is_training=False,
                                     use_lstm=FLAGS.use_lstm,
                                     use_attention=FLAGS.use_attention,
                                     use_embedding=FLAGS.use_embedding,
                                     num_heads=FLAGS.num_heads)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    #config.gpu_options.per_process_gpu_memory_fraction = 0.3
    with tf.Session(config=config) as sess:
        seq_rnn_model.build_model()
        saver = tf.train.Saver()

        with open(FLAGS.checkpoint_path) as f:
            models = f.readlines()[1:]
            models = [line.split(":")[1] for line in models]
            models = [line[2:-2] for line in models]
        test_encoder_inputs, test_decoder_inputs = test_data.next_batch(test_data.size(), shuffle=False)
        target_labels = get_target_labels(test_decoder_inputs)
        test_encoder_inputs = test_encoder_inputs.reshape((-1, FLAGS.n_views, FLAGS.n_input_fc))
        test_encoder_inputs, test_decoder_inputs, test_target_weights = seq_rnn_model.get_batch(test_encoder_inputs,
                                                                                                test_decoder_inputs,
                                                                                                batch_size=test_data.size())
        #models = ["/home1/shangmingyang/data/3dmodel/mvmodel_result/best/shapenet55_color_128_256_0.0001_0.8661_0.7691/mvmodel.ckpt-10"]
        #models = ["/home1/shangmingyang/data/3dmodel/mvmodel_result/best/shapenet55_256_512_0.0002_0.8510_0.7511/mvmodel.ckpt-33"]
        models = ["/home1/shangmingyang/data/3dmodel/mvmodel_result/best/shapenet55_nocolor_128_256_0.00005_0.5_0.8531_0.7471/mvmodel.ckpt-113"]
        #models = models[52:]
        for model_path in models:
            print(model_path)
            saver.restore(sess, model_path)

            _, _, outputs, hidden = seq_rnn_model.step(sess, test_encoder_inputs, test_decoder_inputs, test_target_weights, forward_only=True)  # don't do optimize
            np.save("/home1/shangmingyang/data/ImgJoint3D/feature/shapenet55_nocolor_val", hidden)
            #attns_weights = np.array([attn_weight[0] for attn_weight in attns_weights])
            #attns_weights = np.transpose(attns_weights, (1, 0, 2))
            #np.save('modelnet10_test_attn', attns_weights)
            predict_labels = seq_rnn_model.predict(outputs, all_min_no=False)
            print "predict:", predict_labels
            np.save("predict", predict_labels)
            acc = accuracy(predict_labels, target_labels)
            acc.insert(0, model_path)
            with open(FLAGS.test_acc_file, 'a') as f:
                w = csv.writer(f)
                w.writerow(acc)
            print("model:%s, acc_instance=%f, acc_class=%f" % (model_path, acc[1], acc[2]))
Exemple #6
0
def train(config):
    log(config.log_file, 'Loading data')
    data =  model_data.read_data(config.data, config)
    test_data = data.test
    seq_rnn_model = SequenceRNNModel(config.n_input_fc, config.num_views, config.n_hidden, config.decoder_embedding_size, config.num_classes+1, config.n_hidden,
                                     learning_rate=config.learning_rate,
                                     keep_prob=config.keep_prob,
                                     batch_size=config.batch_size,
                                     is_training=True,
                                     use_lstm=config.use_lstm,
                                     use_attention=config.use_attention,
                                     use_embedding=config.use_embedding,
                                     num_heads=config.num_heads)
                                     
    seq_rnn_model_test = SequenceRNNModel(config.n_input_fc, config.num_views, config.n_hidden, config.decoder_embedding_size, config.num_classes+1, config.n_hidden,
                                 batch_size=test_data.size(),
                                 is_training=False,
                                 use_lstm=config.use_lstm,
                                 use_attention=config.use_attention,
                                 use_embedding=config.use_embedding,
                                 num_heads=config.num_heads)

    seq_rnn_model_test.build_model("eval")
    
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_config.gpu_options.per_process_gpu_memory_fraction = 0.5
    
    with tf.Session(config=tf_config) as sess:
        seq_rnn_model.build_model("train")
        saver = tf.train.Saver()
        init = tf.global_variables_initializer()
        sess.run(init)
        start_epoch = 0
        WEIGHTS = config.weights
        if WEIGHTS!=-1:
            ld = config.log_dir
            start_epoch = WEIGHTS + 1
            saver.restore(sess, get_modelpath(WEIGHTS))
            ACC_LOGGER.load((os.path.join(ld,"{}_acc_train_accuracy.csv".format(config.name)),
                             os.path.join(ld,"{}_acc_eval_accuracy.csv".format(config.name))), epoch = WEIGHTS)
            LOSS_LOGGER.load((os.path.join(ld,"{}_loss_train_loss.csv".format(config.name)),
                               os.path.join(ld,'{}_loss_eval_loss.csv'.format(config.name))), epoch = WEIGHTS)
        

        begin = start_epoch
        accs = []
        losses = []
        end = config.max_epoch + start_epoch
        for epoch in xrange(begin, end + 1): 
            batch = 1
            while batch * config.batch_size <= data.train.size():
                batch_encoder_inputs, batch_decoder_inputs = data.train.next_batch(config.batch_size)
                target_labels = get_target_labels(batch_decoder_inputs)
                batch_encoder_inputs = batch_encoder_inputs.reshape((config.batch_size, config.num_views, config.n_input_fc))
                batch_encoder_inputs, batch_decoder_inputs, batch_target_weights = seq_rnn_model.get_batch(batch_encoder_inputs, batch_decoder_inputs, batch_size=config.batch_size)
                loss, logits = seq_rnn_model.step(sess, batch_encoder_inputs, batch_decoder_inputs, batch_target_weights,forward_only=False)
                predict_labels = seq_rnn_model.predict(logits)
                acc = accuracy(predict_labels, target_labels)
                accs.append(acc)
                losses.append(loss)
                
                if batch % max(config.train_log_frq/config.batch_size, 1) == 0:
                    loss = np.mean(losses)
                    acc = np.mean(accs)
                    LOSS_LOGGER.log( loss, epoch, "train_loss")
                    ACC_LOGGER.log( acc, epoch, "train_accuracy")
                    log(config.log_file, 'TRAINING: EPOCH {} ITERATION {} LOSS {} ACCURACY {}'.format(epoch, batch, loss, acc))
                    accs = []
                    losses = []
                batch += 1

            if epoch % config.save_period == 0 or epoch == end:
                saver.save(sess, get_modelpath(epoch))
            
            weights = seq_rnn_model.get_weights(sess)
            eval_during_training(weights, seq_rnn_model_test, epoch)
    
            ACC_LOGGER.save(config.log_dir)
            LOSS_LOGGER.save(config.log_dir)
            ACC_LOGGER.plot(dest=config.log_dir)
            LOSS_LOGGER.plot(dest=config.log_dir)
Exemple #7
0
#model_data = read_data(FLAGS.data_path)

learning_rate = 0.0001
training_iters = 3183 * 100
batch_size = 10
display_step = 100
save_step = 3183 * 1
need_save = True

# Network Parameters
n_steps = 12  # timesteps
n_input = 4096  # model_data data input (img shape: 28*28)
#n_hidden = 128 # hidden layer num of features
n_classes = 40  # model_data total classes (0-9 digits)

model_data = read_data(FLAGS.data_path)

x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])
p = tf.placeholder("float", shape=())

weights = {
    'out': tf.Variable(tf.random_normal([FLAGS.hidden_size, n_classes]))
}
biases = {'out': tf.Variable(tf.random_normal([n_classes]))}


def RNN(x, weights, biases):
    x = tf.unstack(x, n_steps, 1)
    x_dropout = [tf.nn.dropout(xi, FLAGS.keep_prob) for xi in x]
    lstm_cell = rnn.BasicLSTMCell(FLAGS.hidden_size,