예제 #1
0
    print("using cpu")
    device = torch.device('cpu')

import torch
from torch import nn

#name = "example"
name = "compiled_dataset_08131950"
constructed = True
skip = False
embed_dim = 300

## for training

if not skip and not constructed:
    train_states, train_inventories, train_actions, train_goals, train_instructions, test_states, test_inventories, test_actions, test_goals, test_instructions, all_instructions = read_dataset(
        'data/' + name + '.json', 0.8)

    with open('data/' + name + '_train_states', 'wb') as f:
        pickle.dump(train_states, f)

    with open('data/' + name + '_train_inventories', 'wb') as f:
        pickle.dump(train_inventories, f)

    with open('data/' + name + '_train_actions', 'wb') as f:
        pickle.dump(train_actions, f)

    with open('data/' + name + '_train_goals', 'wb') as f:
        pickle.dump(train_goals, f)

    with open('data/' + name + '_train_instructions', 'wb') as f:
        pickle.dump(train_instructions, f)
def main(argv=None):
    np.random.seed(3796)
    image, logits, keep_probability, sess, annotation, train_op, loss, acc, loss_summary, acc_summary, saver, pred_annotation, train_writer, validation_writer = build_session(
        argv[1])

    print("Setting up image reader...")
    train_records, valid_records = reader.read_dataset(FLAGS.data_dir)
    print(len(train_records))
    print(len(valid_records))

    print("Setting up dataset reader")
    image_options = {'resize': False, 'resize_size': IMAGE_SIZE}
    if FLAGS.mode == 'train':
        train_dataset_reader = dataset.Batch_manager(train_records,
                                                     image_options)
    validation_dataset_reader = dataset.Batch_manager(valid_records,
                                                      image_options)
    """ os.environ["CUDA_VISIBLE_DEVICES"] = argv[1]
    keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
    image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image")
    annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name="annotation")
    pred_annotation, logits = inference(image, keep_probability)
    annotation_64 = tf.cast(annotation, dtype=tf.int64)
    # calculate accuracy for batch.
    cal_acc = tf.equal(pred_annotation, annotation_64)
    cal_acc = tf.cast(cal_acc, dtype=tf.int8)
    acc = tf.count_nonzero(cal_acc) / (FLAGS.batch_size * IMAGE_SIZE * IMAGE_SIZE)
    tf.summary.image("input_image", image, max_outputs=2)
    tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
    tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
    loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                                          labels=tf.squeeze(annotation,
                                                                                            squeeze_dims=[3]),
                                                                          name="entropy")))
    loss_summary=tf.summary.scalar("entropy", loss)
    # summary accuracy in tensorboard
    acc_summary=tf.summary.scalar("accuracy", acc)
    trainable_var = tf.trainable_variables()
    if FLAGS.debug:
        for var in trainable_var:
            utils.add_to_regularization_and_summary(var)
    train_op = train(loss, trainable_var)
    print("Setting up summary op...")
    summary_op = tf.summary.merge_all()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    print("Setting up Saver...")
    saver = tf.train.Saver()
    # train_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/train', sess.graph)
    # validation_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/validation')
    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...") """

    if FLAGS.mode == "train":
        for itr in xrange(MAX_ITERATION):
            train_images, train_annotations = train_dataset_reader.next_batch(
                saver, FLAGS.batch_size, image, logits, keep_probability, sess,
                FLAGS.logs_dir)
            feed_dict = {
                image: train_images,
                annotation: train_annotations,
                keep_probability: 1.0
            }
            tf.set_random_seed(3796 +
                               itr)  # get deterministicly random dropouts
            sess.run(train_op, feed_dict=feed_dict)

            if itr % 50 == 0:
                train_loss, train_acc, summary_loss, summary_acc = sess.run(
                    [loss, acc, loss_summary, acc_summary],
                    feed_dict=feed_dict)
                print("Step: %d, Train_loss: %g, Train_acc: %g" %
                      (itr, train_loss, train_acc))
                with open(join(FLAGS.logs_dir, 'iter_train_loss.csv'),
                          'a') as f:
                    f.write(str(itr) + ',' + str(train_loss) + '\n')
                with open(join(FLAGS.logs_dir, 'iter_train_acc.csv'),
                          'a') as f:
                    f.write(str(itr) + ',' + str(train_acc) + '\n')
                train_writer.add_summary(summary_loss, itr)
                train_writer.add_summary(summary_acc, itr)
            if itr % 600 == 0:
                valid_images, valid_annotations = validation_dataset_reader.next_batch(
                    saver, FLAGS.batch_size, image, logits, keep_probability,
                    sess, FLAGS.logs_dir, True)
                valid_loss, valid_acc, summary_loss, summary_acc = sess.run(
                    [loss, acc, loss_summary, acc_summary],
                    feed_dict={
                        image: valid_images,
                        annotation: valid_annotations,
                        keep_probability: 1.0
                    })
                validation_writer.add_summary(summary_loss, itr)
                validation_writer.add_summary(summary_acc, itr)
                print("%s ---> Validation_loss: %g , Validation Accuracy: %g" %
                      (datetime.datetime.now(), valid_loss, valid_acc))
                with open(join(FLAGS.logs_dir, 'iter_val_loss.csv'), 'a') as f:
                    f.write(str(itr) + ',' + str(valid_loss) + '\n')
                with open(join(FLAGS.logs_dir, 'iter_val_acc.csv'), 'a') as f:
                    f.write(str(itr) + ',' + str(valid_acc) + '\n')
                saver.save(sess, FLAGS.logs_dir + "model.ckpt", itr)

    elif FLAGS.mode == "visualize":
        valid_images, valid_annotations = validation_dataset_reader.get_random_batch(
            FLAGS.batch_size)
        pred = sess.run(pred_annotation,
                        feed_dict={
                            image: valid_images,
                            annotation: valid_annotations,
                            keep_probability: 1.0
                        })
        valid_annotations = np.squeeze(valid_annotations, axis=3)
        pred = np.squeeze(pred, axis=3)

        for itr in range(FLAGS.batch_size):
            print(valid_images[itr].astype(np.uint8).shape)
            utils.save_image(valid_images[itr, :, :, :3].astype(np.uint8),
                             FLAGS.logs_dir,
                             name="inp_" + str(itr))
            print(valid_annotations[itr].astype(np.uint8).shape)
            utils.save_image(valid_annotations[itr].astype(np.uint8),
                             FLAGS.logs_dir,
                             name="gt_" + str(itr))
            print(pred[itr].astype(np.uint8).shape)
            utils.save_image(pred[itr].astype(np.uint8),
                             FLAGS.logs_dir,
                             name="pred_" + str(itr))
            print("Saved image: %d" % itr)
예제 #3
0
    # "E:\dataset\glove\glove.6B.50d.txt"
    f = open(filePath, 'r', encoding="utf8")
    model = dict()
    for line in f:
        splitLine = line.strip().replace(" ", "\t").split('\t')
        word = splitLine[0]
        embedding = np.array([float(val) for val in splitLine[1:]])
        model[word] = embedding
    print("Done.", len(model), " words loaded!")
    return model


if __name__ == '__main__':
    # read parameters
    train_df, dev_df = read_dataset("E:\\dataset\\sick2014\\SICK_train.csv",
                                    split=True,
                                    normalize_scores=True)
    test_df = read_dataset("E:\\dataset\\sick2014\\SICK_trial.csv",
                           split=False,
                           normalize_scores=True)
    pretrained = None
    save_path = "output.csv"
    # initialize objects
    print('Initializing objects ...')
    print('Initializing word embeddings ...')
    t1 = time.time()
    # /media/reza/book/dataset/word2vec/GoogleNews-vectors-negative300.bin
    # word_embeddings = WordEmbeddings("/media/reza/book/dataset/word2vec/GoogleNews-vectors-negative300.bin")
    word_embeddings = loadWordModel("E:\\dataset\\glove\\glove.6B.50d.txt")
    # /media/reza/book/Py_Projects/Lample2016-tagger-master/model_tag2vec.txt
    pos_embeddings = loadWordModel(