コード例 #1
0
    def __init__(self):
        self.sat_x = tf.placeholder(tf.float32, [None, 512, 512, 3],
                                    name='sat_x')
        self.grd_x = tf.placeholder(tf.float32, [None, 224, 1232, 3],
                                    name='grd_x')
        self.keep_prob = tf.placeholder(tf.float32)
        self.global_count = 0
        self.aerial_image_prefix = SATELLITE_IMAGE_PREFIX
        self.ground_image_prefix = GROUND_IMAGE_PREFIX
        self.num_imgs = NUM_SAMPLES

        # build model
        if NETWORK_TYPE == 'CVM-NET-I':
            self.sat_global, self.grd_global = cvm_net_I(
                self.sat_x, self.grd_x, self.keep_prob, False)
        elif NETWORK_TYPE == 'CVM-NET-II':
            self.sat_global, self.grd_global = cvm_net_II(
                self.sat_x, self.grd_x, self.keep_prob, False)
        else:
            print(
                'CONFIG ERROR: wrong network type, only CVM-NET-I and CVM-NET-II are valid'
            )

        # run model
        print('CVMInference object created')
        self.config = tf.ConfigProto(log_device_placement=False,
                                     allow_soft_placement=True)
        self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)

        with tf.Session(config=self.config) as sess:
            sess.run(tf.global_variables_initializer())
            print('loading model...')
            load_model_path = '../Model/' + NETWORK_TYPE + '/' + str(
                0) + '/model.ckpt'
            self.saver.restore(sess, load_model_path)
            print("   Model loaded from: %s" % load_model_path)
            print('load model...FINISHED')
            print('Test with black images')
            sat_zeros = np.zeros([1, 512, 512, 3])
            grd_zeros = np.zeros([1, 224, 1232, 3])
            feed_dict = {
                self.sat_x: sat_zeros,
                self.grd_x: grd_zeros,
                self.keep_prob: 1.0
            }
            sat_global_val, grd_global_val = sess.run(
                [self.sat_global, self.grd_global], feed_dict=feed_dict)
            print('Test on black images passed')
コード例 #2
0
def train(start_epoch=1):
    '''
    Train the network and do the test
    :param start_epoch: the epoch id start to train. The first epoch is 1.
    '''

    # import data
    input_data = InputData()

    # define placeholders
    sat_x = tf.placeholder(tf.float32, [None, 512, 512, 3], name='sat_x')
    grd_x = tf.placeholder(tf.float32, [None, 224, 1232, 3], name='grd_x')
    keep_prob = tf.placeholder(tf.float32)
    learning_rate = tf.placeholder(tf.float32)

    # build model
    if network_type == 'CVM-NET-I':
        sat_global, grd_global = cvm_net_I(sat_x, grd_x, keep_prob,
                                           is_training)
    elif network_type == 'CVM-NET-II':
        sat_global, grd_global = cvm_net_II(sat_x, grd_x, keep_prob,
                                            is_training)
    else:
        print(
            'CONFIG ERROR: wrong network type, only CVM-NET-I and CVM-NET-II are valid'
        )

    # define loss
    loss = compute_loss(sat_global, grd_global, 0)

    # set training
    global_step = tf.Variable(0, trainable=False)
    with tf.device('/gpu:0'):
        with tf.name_scope('train'):
            train_step = tf.train.AdamOptimizer(
                learning_rate, 0.9, 0.999).minimize(loss,
                                                    global_step=global_step)

    saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)

    # run model
    print('run model...')
    config = tf.ConfigProto(log_device_placement=False,
                            allow_soft_placement=True)
    #config.gpu_options.allow_growth = True
    #config.gpu_options.per_process_gpu_memory_fraction = 0.9
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())

        print('load model...')
        load_model_path = '../Model/' + network_type + '/' + str(
            start_epoch - 1) + '/model.ckpt'
        saver.restore(sess, load_model_path)
        print("   Model loaded from: %s" % load_model_path)
        print('load model...FINISHED')

        # Train
        for epoch in range(start_epoch, start_epoch + number_of_epoch):
            iter = 0
            if is_training:
                # train
                while True:
                    batch_sat, batch_grd = input_data.next_pair_batch(
                        batch_size)
                    if batch_sat is None:
                        break

                    global_step_val = tf.train.global_step(sess, global_step)

                    feed_dict = {
                        sat_x: batch_sat,
                        grd_x: batch_grd,
                        learning_rate: learning_rate_val,
                        keep_prob: keep_prob_val
                    }
                    if iter % 20 == 0:
                        _, loss_val = sess.run([train_step, loss],
                                               feed_dict=feed_dict)
                        print('global %d, epoch %d, iter %d: loss : %.4f' %
                              (global_step_val, epoch, iter, loss_val))
                    else:
                        sess.run(train_step, feed_dict=feed_dict)

                    iter += 1

            # ---------------------- validation ----------------------
            print('validate...')
            print('   compute global descriptors')
            input_data.reset_scan()
            sat_global_descriptor = np.zeros(
                [input_data.get_test_dataset_size(), 4096])
            grd_global_descriptor = np.zeros(
                [input_data.get_test_dataset_size(), 4096])
            val_i = 0
            while True:
                print('      progress %d' % val_i)
                batch_sat, batch_grd = input_data.next_batch_scan(batch_size)
                if batch_sat is None:
                    break
                feed_dict = {
                    sat_x: batch_sat,
                    grd_x: batch_grd,
                    keep_prob: 1.0
                }
                sat_global_val, grd_global_val = \
                    sess.run([sat_global, grd_global], feed_dict=feed_dict)

                sat_global_descriptor[
                    val_i:val_i + sat_global_val.shape[0], :] = sat_global_val
                grd_global_descriptor[
                    val_i:val_i + grd_global_val.shape[0], :] = grd_global_val
                val_i += sat_global_val.shape[0]

            print('   compute accuracy')
            val_accuracy = validate(grd_global_descriptor,
                                    sat_global_descriptor)
            with open('../Result/' + str(network_type) + '_accuracy.txt',
                      'a') as file:
                file.write(
                    str(epoch) + ' ' + str(iter) + ' : ' + str(val_accuracy) +
                    '\n')
            print('   %d: accuracy = %.1f%%' % (epoch, val_accuracy * 100.0))

            model_dir = '../Model/' + network_type + '/' + str(epoch) + '/'
            if not os.path.exists(model_dir):
                os.makedirs(model_dir)
            save_path = saver.save(sess, model_dir + 'model.ckpt')
            print("Model saved in file: %s" % save_path)
コード例 #3
0
def train(start_epoch=1):
    '''
    Train the network and do the test
    :param start_epoch: the epoch id start to train. The first epoch is 1.
    '''

    # import data (get the train and validation data) in the format
    # satellite filename, streetview filename, pano_id
    # its job is to just create a python version of the list that's already
    # there in the test file
    input_data = InputData()

    # define placeholders to feed actual training examples
    # size of the actual images sat-ellite and ground
    #satellite (512, 512) image shape
    sat_x = tf.placeholder(tf.float32, [None, 512, 512, 3], name='sat_x')
    #ground (224, 1232) image shape
    grd_x = tf.placeholder(tf.float32, [None, 224, 1232, 3], name='grd_x')
    keep_prob = tf.placeholder(tf.float32)  #dropout
    learning_rate = tf.placeholder(tf.float32)

    # just BUILDING MODEL, satellite and ground image will be given later
    if network_type == 'CVM-NET-I':
        sat_global, grd_global = cvm_net_I(sat_x, grd_x, keep_prob,
                                           is_training)
    elif network_type == 'CVM-NET-II':
        sat_global, grd_global = cvm_net_II(sat_x, grd_x, keep_prob,
                                            is_training)
    else:
        print(
            'CONFIG ERROR: wrong network type, only CVM-NET-I and CVM-NET-II are valid'
        )

    # define loss
    loss = compute_loss(sat_global, grd_global, 0)

    # set training
    global_step = tf.Variable(0, trainable=False)
    with tf.device('/gpu:0'):
        with tf.name_scope('train'):
            train_step = tf.train.AdamOptimizer(
                learning_rate, 0.9, 0.999).minimize(loss,
                                                    global_step=global_step)

    saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)

    # run model
    print('run model...')
    config = tf.ConfigProto(log_device_placement=False,
                            allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 0.9
    with tf.Session(config=config) as sess:

        sess.run(tf.global_variables_initializer())

        print('load model...')
        # load_model_path = '../Model/' + network_type + '/' + str(start_epoch - 1) + '/model.ckpt'
        # saver.restore(sess, load_model_path)
        # print("   Model loaded from: %s" % load_model_path)
        # print('load model...FINISHED')

        os.chdir('../../Model/')

        cwd = os.getcwd()
        load_model_path = cwd + '/' + network_name + '/' + network_name + '_model'
        print(load_model_path)
        saver = tf.train.import_meta_graph(load_model_path +
                                           "/model.ckpt.meta")
        print('????????')
        load_model_path += '/model.ckpt'
        saver.restore(sess, load_model_path)
        print("   Model loaded from: %s" % load_model_path)
        print('load model...FINISHED')
        import tensorflow.contrib.slim as slim
        model_vars = tf.trainable_variables()
        slim.model_analyzer.analyze_vars(model_vars, print_info=True)

        print('training...')

        # Train
        for epoch in range(start_epoch, start_epoch + number_of_epoch):
            iter = 0
            while True:
                # train
                batch_sat, batch_grd = input_data.next_pair_batch(batch_size)
                if batch_sat is None:
                    break

                global_step_val = tf.train.global_step(sess, global_step)

                feed_dict = {
                    sat_x: batch_sat,
                    grd_x: batch_grd,
                    learning_rate: learning_rate_val,
                    keep_prob: keep_prob_val
                }
                print("run model")
                if iter % 20 == 0:
                    print('running {}'.format(iter))
                    _, loss_val = sess.run([train_step, loss],
                                           feed_dict=feed_dict)
                    print('global %d, epoch %d, iter %d: loss : %.4f' %
                          (global_step_val, epoch, iter, loss_val))
                else:
                    print("running")
                    sess.run(train_step, feed_dict=feed_dict)
                print("ran once?")
                iter += 1
コード例 #4
0
ファイル: train_google.py プロジェクト: raaslab/CVM-Net
def train(start_epoch=1):
    '''
    Train the network and do the test
    :param start_epoch: the epoch id start to train. The first epoch is 1.
    '''

    # import data (get the train and validation data) in the format
    # satellite filename, streetview filename, pano_id
    # its job is to just create a python version of the list that's already
    # there in the test file
    input_data = InputData()

    # define placeholders to feed actual training examples
    # size of the actual images sat-ellite and ground
    # satellite (512, 512) image shape
    sat_x = tf.placeholder(tf.float32, [None, 512, 512, 3], name='sat_x')
    # ground (224, 1232) image shape
    grd_x = tf.placeholder(tf.float32, [None, 224, 1232, 3], name='grd_x')
    keep_prob = tf.placeholder(tf.float32)  # dropout
    learning_rate = tf.placeholder(tf.float32)

    # just BUILDING MODEL, satellite and ground image will be given later
    if network_type == 'CVM-NET-I':
        sat_global, grd_global = cvm_net_I(sat_x, grd_x, keep_prob,
                                           is_training)
    elif network_type == 'CVM-NET-II':
        sat_global, grd_global = cvm_net_II(sat_x, grd_x, keep_prob,
                                            is_training)
    else:
        print(
            'CONFIG ERROR: wrong network type, only CVM-NET-I and CVM-NET-II are valid'
        )

    # define loss
    loss = compute_loss(sat_global, grd_global, 0)

    # set training
    global_step = tf.Variable(0, trainable=False)
    with tf.device('/gpu:0'):
        with tf.name_scope('train'):
            train_step = tf.train.AdamOptimizer(
                learning_rate, 0.9, 0.999).minimize(loss,
                                                    global_step=global_step)

    saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)

    # run model
    print('run model...')
    config = tf.ConfigProto(log_device_placement=False,
                            allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 0.9

    plt.ion()
    plt.xlabel('epochs')
    plt.ylabel('loss/accuracy')
    plt.show()
    p = []
    p_val = []
    p_acc = []
    p_acc_ = []
    with tf.Session(config=config) as sess:

        sess.run(tf.global_variables_initializer())

        print('load model...')

        ### dont uncomment ###
        # load_model_path = '../Model/' + network_type + '/' + str(start_epoch - 1) + '/model.ckpt'
        # saver.restore(sess, load_model_path)
        # print("   Model loaded from: %s" % load_model_path)
        # print('load model...FINISHED')

        os.chdir('../Model/')
        cwd = os.getcwd()
        if (start_epoch == 1):
            load_model_path = cwd + '/' + network_name + '/' + network_name + '_model'
        # else:
        #     load_model_path = cwd + '/' + network_name + '/' + network_name + '_syd_original/' + network_type + '/' + str(start_epoch)
        saver = tf.train.import_meta_graph(load_model_path +
                                           "/model.ckpt.meta")
        load_model_path += '/model.ckpt'
        saver.restore(sess, load_model_path)
        print("   Model loaded from: %s" % load_model_path)
        print('load model...FINISHED')
        # import tensorflow.contrib.slim as slim
        # model_vars = tf.trainable_variables()
        # slim.model_analyzer.analyze_vars(model_vars, print_info=True)

        print('training...from epoch {}'.format(start_epoch))
        # Train

        for epoch in range(start_epoch, start_epoch + number_of_epoch):
            iter = 0
            train_loss = []
            val_loss = []
            while True:
                # train
                batch_sat, batch_grd = input_data.next_pair_batch(batch_size)
                if batch_sat is None:
                    break

                global_step_val = tf.train.global_step(sess, global_step)

                feed_dict = {
                    sat_x: batch_sat,
                    grd_x: batch_grd,
                    learning_rate: learning_rate_val,
                    keep_prob: keep_prob_val
                }
                print("run model")
                # if iter % 20 == 0:
                # print('running {}'.format(iter))
                _, loss_val = sess.run([train_step, loss], feed_dict=feed_dict)
                train_loss.append(loss_val)
                print('global %d, epoch %d, iter %d: loss : %.4f' %
                      (global_step_val, epoch, iter, loss_val))
                train_loss.append(loss_val)
                iter += 1

            plt.legend()
            p += [np.mean(train_loss)]
            plt.plot(p, 'b-')
            plt.pause(0.05)

            # ---------------------- validation ----------------------
            print('validate...')
            print('   compute global descriptors')
            input_data.reset_scan()
            sat_global_descriptor = np.zeros(
                [input_data.get_test_dataset_size(), 4096])
            grd_global_descriptor = np.zeros(
                [input_data.get_test_dataset_size(), 4096])
            val_i = 0
            while True:
                print('      progress %d' % val_i)
                # get the sat and grd batch; this is just the input images
                batch_sat, batch_grd = input_data.next_batch_scan(batch_size)
                if batch_sat is None:
                    break  # break once all batches are over
                # create a dictionary
                feed_dict = {
                    sat_x: batch_sat,
                    grd_x: batch_grd,
                    keep_prob: 1.0
                }

                # this dictionary stores all the global descriptors
                sat_global_val, grd_global_val = \
                    sess.run([sat_global, grd_global], feed_dict=feed_dict)
                # print('sat_global_val ', sat_global_val)

                val_loss.append(sess.run(loss, feed_dict=feed_dict))

                sat_global_descriptor[
                    val_i:val_i + sat_global_val.shape[0], :] = sat_global_val
                grd_global_descriptor[
                    val_i:val_i + grd_global_val.shape[0], :] = grd_global_val
                val_i += sat_global_val.shape[0]  # is this 64*512?

            # print('val_loss ', val_loss)
            p_val += [np.mean(val_loss)]
            plt.plot(p_val, 'r-')
            plt.pause(0.05)

            print('   compute accuracy')
            val_accuracy, val_accuracy_ = validate(grd_global_descriptor,
                                                   sat_global_descriptor)
            p_acc += [val_accuracy]
            p_acc_ += [val_accuracy_]
            plt.plot(p_acc, 'k-')
            plt.pause(0.05)
            plt.plot(p_acc_, 'g-')
            plt.pause(0.05)

            with open('../Result/' + str(network_type) + '_accuracy.txt',
                      'a') as file:
                file.write(
                    str(epoch) + ' ' + str(iter) + ' : ' + str(val_accuracy) +
                    '\n')
            print('   %d: accuracy = %.1f%%' % (epoch, val_accuracy * 100.0))
            print('accuracy_ ', val_accuracy_)
            cwd = os.getcwd()
            os.chdir('../Model/CVM-Net-I/CVM-Net-I_sydney_dense/')
            cwd = os.getcwd()
            os.chdir('../../../CVM-Net/')

            model_dir = cwd + '/' + network_type + '/' + str(epoch) + '/'
            if not os.path.exists(model_dir):
                os.makedirs(model_dir)
                if (epoch > 70 or epoch % 5 == 0):
                    save_path = saver.save(sess, model_dir + 'model.ckpt')
                    # sio.savemat(model_dir + 'np_vector_CVM_Net.mat', {'sat_global_descriptor': sat_global_descriptor,
                    #                                                   'grd_global_descriptor': grd_global_descriptor})
                    print("Model saved in file: %s" % save_path)
コード例 #5
0
def get_descriptors():

    model_name = 'nyc'
    which_epoch = 119
    Data_folder = 'nyc'
    # get descriptors of images mentioned in this file
    # Test_file = 'train-19zl.csv'
    Test_file = 'train-test.csv'
    save_file = 'all_nyc_descriptors.mat'

    input_data = ValidateData(Data_folder, Test_file)

    sat_x = tf.placeholder(tf.float32, [None, 512, 512, 3], name='sat_x')
    grd_x = tf.placeholder(tf.float32, [None, 224, 1232, 3], name='grd_x')
    keep_prob = tf.placeholder(tf.float32)

    if network_type == 'CVM-NET-I':
        sat_global, grd_global = cvm_net_I(sat_x, grd_x, keep_prob, is_training)
    elif network_type == 'CVM-NET-II':
        sat_global, grd_global = cvm_net_II(sat_x, grd_x, keep_prob, is_training)
    else:
        print ('CONFIG ERROR: wrong network type, only CVM-NET-I and CVM-NET-II are valid')

    saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)

    config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 0.9

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())

        print('load model...')
        os.chdir('../Model/')

        cwd = os.getcwd()
        # load the interleaved sydney dataset
        load_model_path = cwd + '/' + network_name + '/' + network_name + '_'+ model_name + '/' + network_type + '/' + str(which_epoch)
        # print(load_model_path)
        saver = tf.train.import_meta_graph(load_model_path + "/model.ckpt.meta")
        load_model_path += '/model.ckpt'
        saver.restore(sess, load_model_path)
        print("   Model loaded from: %s" % load_model_path)
        print('load model...FINISHED')

        print('testing...')

        print('   compute global descriptors')
        input_data.reset_scan()
        sat_global_descriptor = np.zeros(
            [input_data.get_test_dataset_size(), 4096])
        grd_global_descriptor = np.zeros(
            [input_data.get_test_dataset_size(), 4096])
        val_i = 0
        # this is for train
        while True:
            print('progress %d' % val_i)
            batch_sat, batch_grd = input_data.next_batch_scan(batch_size)
            if batch_sat is None:
                break  # break when all the batches are evaluated
            feed_dict = {sat_x: batch_sat, grd_x: batch_grd, keep_prob: 1.0}
            # works fine until here
            # forward pass
            sat_global_val, grd_global_val = \
                sess.run([sat_global, grd_global], feed_dict=feed_dict)  # feed in the batch input here

            sat_global_descriptor[val_i: val_i + sat_global_val.shape[0], :] = sat_global_val
            grd_global_descriptor[val_i: val_i + grd_global_val.shape[0], :] = grd_global_val
            val_i += sat_global_val.shape[0]

        cwd = os.getcwd()
        os.chdir('../Model/CVM-Net-I/CVM-Net-I_'+ model_name +'/')
        cwd = os.getcwd()
        os.chdir('../../../CVM-Net/')

        model_dir = cwd + '/' + network_type + '/'
        # print('compute accuracy')
        # # This would be the train and test accuracy
        # val_accuracy, val_accuracy_ = validate(grd_global_descriptor, sat_global_descriptor)
        # print("10_percent ",val_accuracy)
        # print("1_percent ", val_accuracy_)
        sio.savemat(model_dir + save_file,
                    {'sat_global_descriptor': sat_global_descriptor, 'grd_global_descriptor': grd_global_descriptor})
コード例 #6
0
    def find_knn(self):
        tf.reset_default_graph()
        # where is the trained model stored?
        model_name = 'nyc/'
        # which trained epoch model do you want to use?
        which_epoch = 119
        # name of the file where satellite descriptors are stored
        sat_descriptor_file = 'all_nyc_descriptors.mat'
        # what is the name of the folder where you will find the images?
        Data_folder = 'nyc'

        # import data (get the test data) in the format,
        # uncomment the other TestData class in input_data.py if you are using this
        input_data = TestData(Data_folder, self.image_path)
        # define placeholders to feed actual training examples
        # size of the actual images satellite and ground
        # satellite (512, 512) image shape
        sat_x = tf.placeholder(tf.float32, [None, 512, 512, 3], name='sat_x')
        # ground (224, 1232) image shape
        grd_x = tf.placeholder(tf.float32, [None, 224, 1232, 3], name='grd_x')
        keep_prob = tf.placeholder(tf.float32)  # dropout

        # just BUILDING MODEL, satellite and ground image will be given later
        if network_type == 'CVM-NET-I':
            sat_global, grd_global = cvm_net_I(sat_x, grd_x, keep_prob, is_training)
        elif network_type == 'CVM-NET-II':
            sat_global, grd_global = cvm_net_II(sat_x, grd_x, keep_prob, is_training)
        else:
            print ('CONFIG ERROR: wrong network type, only CVM-NET-I and CVM-NET-II are valid')

        # This variable downgrades the accuracy, but why?
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)
        # run model
        print('run model...')
        config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = 0.9

        with tf.Session(config=config) as sess:

            sess.run(tf.global_variables_initializer())

            print('load model...')

            os.chdir('../Data/')
            image_store = os.getcwd()

            os.chdir('../Model/')
            cwd = os.getcwd()

            load_model_path = cwd + '/' + network_name + '/' + network_name + '_' + model_name + '/' + network_type + '/' + str(which_epoch)
            sat_descriptor_file = '/scratch1/crossview2/descriptors/' + sat_descriptor_file

            saver = tf.train.import_meta_graph(load_model_path + "/model.ckpt.meta")
            load_model_path += '/model.ckpt'
            saver.restore(sess, load_model_path)
            print("   Model loaded from: %s" % load_model_path)
            print('load model...FINISHED')

            # ---------------------- Testing ----------------------
            print('Test...')
            print('compute global descriptors')
            input_data.reset_scan()
            sat_global_descriptor = np.zeros([input_data.get_test_dataset_size(), 4096])
            grd_global_descriptor = np.zeros([input_data.get_test_dataset_size(), 4096])
            # grd_global_descriptor = np.zeros([1, 4096])
            val_i = 0
            while True:
                print('progress %d' % val_i)
                # get the sat and grd batch; this is just the input images
                batch_sat, batch_grd = input_data.next_batch_scan(batch_size)
                if batch_sat is None:
                    break  # break once all batches are over
                # create a dictionary
                feed_dict = {sat_x: batch_sat, grd_x: batch_grd, keep_prob: 1.0}

                # this dictionary stores all the global descriptors
                sat_global_val, grd_global_val = \
                    sess.run([sat_global, grd_global], feed_dict=feed_dict)

                sat_global_descriptor[val_i: val_i + sat_global_val.shape[0], :] = sat_global_val
                grd_global_descriptor[val_i: val_i + grd_global_val.shape[0], :] = grd_global_val
                val_i += sat_global_val.shape[0]  # is this 64*512?

            print('compute accuracy')
            topk_images = validate(grd_global_descriptor, sat_descriptor_file, self.k)
            # print(np.shape(topk_images))

            gnd_image_paths = []
            sat_image_paths = []
            gps_coordinates = []
            for im in topk_images:
                gps_coordinates.append(self.points[im])
                gnd_image_path = image_store + '/streetview/' + str(im) + '.jpg'
                sat_image_path = image_store + '/satellite/' + str(im) + '.jpg'
                gnd_image_paths.append(gnd_image_path)
                sat_image_paths.append(sat_image_path)
            # print(np.shape(gnd_image_paths))
            # print(np.shape(sat_image_paths))
            return sat_image_paths, gnd_image_paths, gps_coordinates
コード例 #7
0
ファイル: validate.py プロジェクト: raaslab/CVM-Net
def test():
    # model_name = 'sydney_dense'
    # which_epoch = 77

    model_name = 'syd_orig_90'
    which_epoch = 119

    # import data (get the test data) in the format
    input_data = InputData()

    # define placeholders to feed actual training examples
    # size of the actual images satellite and ground
    # satellite (512, 512) image shape
    sat_x = tf.placeholder(tf.float32, [None, 512, 512, 3], name='sat_x')
    # ground (224, 1232) image shape
    grd_x = tf.placeholder(tf.float32, [None, 224, 1232, 3], name='grd_x')
    keep_prob = tf.placeholder(tf.float32)  # dropout

    # just BUILDING MODEL, satellite and ground image will be given later
    if network_type == 'CVM-NET-I':
        sat_global, grd_global = cvm_net_I(sat_x, grd_x, keep_prob,
                                           is_training)
    elif network_type == 'CVM-NET-II':
        sat_global, grd_global = cvm_net_II(sat_x, grd_x, keep_prob,
                                            is_training)
    else:
        print(
            'CONFIG ERROR: wrong network type, only CVM-NET-I and CVM-NET-II are valid'
        )

    # This variable downgrades the accuracy, but why?
    saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)
    # run model
    print('run model...')
    config = tf.ConfigProto(log_device_placement=False,
                            allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 0.9

    with tf.Session(config=config) as sess:

        sess.run(tf.global_variables_initializer())

        print('load model...')

        os.chdir('../Model/')
        cwd = os.getcwd()

        load_model_path = cwd + '/' + network_name + '/' + network_name + '_' + model_name + '/' + network_type + '/' + str(
            which_epoch)

        saver = tf.train.import_meta_graph(load_model_path +
                                           "/model.ckpt.meta")
        load_model_path += '/model.ckpt'
        saver.restore(sess, load_model_path)
        print("   Model loaded from: %s" % load_model_path)
        print('load model...FINISHED')

        # ---------------------- Testing ----------------------
        print('Test...')
        print('compute global descriptors')
        input_data.reset_scan()
        sat_global_descriptor = np.zeros(
            [input_data.get_tt_dataset_size(), 4096])
        grd_global_descriptor = np.zeros(
            [input_data.get_tt_dataset_size(), 4096])
        # grd_global_descriptor = np.zeros([1, 4096])
        val_i = 0
        while True:
            print('progress %d' % val_i)
            # get the sat and grd batch; this is just the input images
            batch_sat, batch_grd = input_data.next_tt_scan(batch_size)
            if batch_sat is None:
                break  # break once all batches are over
            # create a dictionary
            feed_dict = {sat_x: batch_sat, grd_x: batch_grd, keep_prob: 1.0}

            # this dictionary stores all the global descriptors
            sat_global_val, grd_global_val = \
                sess.run([sat_global, grd_global], feed_dict=feed_dict)

            sat_global_descriptor[val_i:val_i +
                                  sat_global_val.shape[0], :] = sat_global_val
            grd_global_descriptor[val_i:val_i +
                                  grd_global_val.shape[0], :] = grd_global_val
            val_i += sat_global_val.shape[0]  # is this 64*512?

        print('   compute accuracy')
        val_accuracy, val_accuracy_ = validate(grd_global_descriptor,
                                               sat_global_descriptor)

        # with open('../Result/' + str(network_type) + '_accuracy.txt', 'a') as file:
        #     file.write(str(epoch) + ' ' + str(iter) + ' : ' + str(val_accuracy) + '\n')
        print('accuracy_10percent = %.1f%%' % (val_accuracy * 100.0))
        print('accuracy_1percent = %.1f%%' % (val_accuracy_ * 100.0))
コード例 #8
0
ファイル: validate.py プロジェクト: raaslab/CVM-Net
def train(start_epoch=1):
    '''
    Train the network and do the test
    :param start_epoch: the epoch id start to train. The first epoch is 1.
    '''

    # import data (get the train and validation data) in the format
    # satellite filename, streetview filename, pano_id
    # its job is to just create a python version of the list that's already
    # there in the test file

    ### NOTE: JUST CHANGE THE TEST FILE ###
    input_data = InputData()

    # define placeholders to feed actual training examples
    # size of the actual images sat-ellite and ground
    # satellite (512, 512) image shape
    sat_x = tf.placeholder(tf.float32, [None, 512, 512, 3], name='sat_x')
    # ground (224, 1232) image shape
    grd_x = tf.placeholder(tf.float32, [None, 224, 1232, 3], name='grd_x')

    # sat_anchor = tf.placeholder(tf.float32, [None, 512, 512, 3], name='sat_anchor')
    # sat_positive = tf.placeholder(tf.float32, [None, 512, 512, 3], name='sat_positive')

    keep_prob = tf.placeholder(tf.float32)  # dropout
    learning_rate = tf.placeholder(tf.float32)

    geo_coords = tf.placeholder(tf.float32, [None, 2], name='geo_coords')

    # just BUILDING MODEL, satellite and ground image will be given later
    if network_type == 'CVM-NET-I':
        # sat_global, grd_global = cvm_net_I(sat_x, grd_x, keep_prob, is_training)
        sat_global, grd_global = cvm_net_I(sat_x, grd_x, geo_coords, keep_prob,
                                           is_training)
        # sat_a_global, sat_p_global = cvm_net_I(sat_anchor, sat_positive, keep_prob, is_training)
    elif network_type == 'CVM-NET-II':
        sat_global, grd_global = cvm_net_II(sat_x, grd_x, keep_prob,
                                            is_training)
    else:
        print(
            'CONFIG ERROR: wrong network type, only CVM-NET-I and CVM-NET-II are valid'
        )

    W = tf.get_variable('W',
                        shape=[4098, 4096],
                        initializer=tf.random_normal_initializer(stddev=1e-1))
    b = tf.get_variable('b',
                        shape=[4096],
                        initializer=tf.constant_initializer(0.1))

    # define loss
    loss = compute_loss(sat_global, grd_global, 0)
    # loss = compute_loss(sat_global, grd_global, sat_a_global, sat_p_global, 0)

    # set training
    global_step = tf.Variable(0, trainable=False)
    with tf.device('/gpu:0'):
        with tf.name_scope('train'):
            train_step = tf.train.AdamOptimizer(
                learning_rate, 0.9, 0.999).minimize(loss,
                                                    global_step=global_step)

    # save the graph, global variables

    os.chdir('../Model/')
    cwd = os.getcwd()

    model_name = 'sydney_dense'
    load_model_path = cwd + '/' + network_name + '/' + network_name + '_model'
    if (start_epoch > 1):
        load_model_path = cwd + '/' + network_name + '/' + network_name + '_' + model_name + '/' + network_type + '/' + str(
            start_epoch)
        print('start epoch is ', start_epoch)
        print(load_model_path)
    full_var_list = list()
    old_list = []
    checkpoint_path = os.path.join(load_model_path, "model.ckpt")

    reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)

    var_to_shape_map = reader.get_variable_to_shape_map()
    for key in var_to_shape_map:
        # print(key)
        # key_a =
        # key_ = key.split(',')
        # key_1 = key_[0]
        old_list.append(key)
        # print(key,key_1)

    # print(var_list)
    # print(type(tf.train.list_variables(checkpoint_path)))

    for vl in old_list:
        print(vl)
        try:

            tensor_aux = tf.get_default_graph().get_tensor_by_name(vl + ":0")
            print(tensor_aux.name)
            print('##########################')
            print(vl)
            print('dome')

        except:
            wh = 1
            # print('Not found: ' + vl)
        # key_ = tensor_aux.name
        # key_1 = key_[0]
        # print(key_)
        # print(tensor_aux)
        # full_var_list.append(key_)
    # import collections
    # print([item for item, count in collections.Counter(full_var_list).items() if count > 1])

    exit()
    # for li in full_var_list:
    #     print(li)

    # saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)
    saver = tf.train.Saver(full_var_list)
    # run model
    print('run model...')

    config = tf.ConfigProto(log_device_placement=False,
                            allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 0.9

    plt.ion()
    plt.xlabel('epochs')
    plt.ylabel('loss/accuracy')
    # plt.show()
    p = []
    p_val = []
    p_acc = []
    p_acc_ = []

    with tf.Session(config=config) as sess:
        # initialize global variables
        sess.run(tf.global_variables_initializer())
        #
        # for va in tf.global_variables():
        #     # print(va)
        #     print(va.name)
        # exit()
        # for i in tf.get_default_graph().get_operations():
        #     # print(i.name)

        #
        # try:
        #     tensor_aux = tf.get_default_graph().get_tensor_by_name(str(va))
        # except:
        #     print('Not found: '+va)

        # all_vars.append(va)
        print('load model...')

        ### dont uncomment ###
        # load_model_path = '../Model/' + network_type + '/' + str(start_epoch - 1) + '/model.ckpt'
        # saver.restore(sess, load_model_path)

        saver = tf.train.import_meta_graph(load_model_path +
                                           "/model.ckpt.meta")
        load_model_path += '/model.ckpt'
        # We can restore the parameters of the network by calling restore on this saver
        # which is an instance of tf.train.Saver() class.
        saver.restore(sess, load_model_path)
        sat_global = tf.concat([sat_global, geo_coords], 1)
        sat_global = tf.nn.l2_normalize(sat_global, dim=1)

        fc_sat = tf.matmul(sat_global, W) + b
        # fc_sat = self.fc_layer(sat_global, 4098, 4096, 0.005, 0.1, trainable, 'fc2', activation_fn=None)
        sat_global = tf.nn.l2_normalize(fc_sat, dim=1)

        print("   Model loaded from: %s" % load_model_path)
        print('load model...FINISHED')
        print('training...')

        for epoch in range(start_epoch, start_epoch + number_of_epoch):
            iter = 0
            train_loss = []
            val_loss = []
            print('epoch ', epoch)
            while True:
                # these are batch images
                # batch_sat_anchor, batch_sat_positive = input_data.next_sat_batch(batch_size)

                # if batch_sat_anchor is None:

                #     break

                # if batch_sat_positive is None:
                #     break
                # train
                batch_sat, batch_grd, batch_coords = input_data.next_pair_batch(
                    batch_size)
                if batch_sat is None:
                    break
                global_step_val = tf.train.global_step(sess, global_step)

                # feed_dict = {sat_x: batch_sat, grd_x: batch_grd, sat_anchor: batch_sat_anchor, sat_positive: batch_sat_positive,
                #              learning_rate: learning_rate_val, keep_prob: keep_prob_val}
                feed_dict = {
                    sat_x: batch_sat,
                    grd_x: batch_grd,
                    geo_coords: batch_coords,
                    learning_rate: learning_rate_val,
                    keep_prob: keep_prob_val
                }

                _, loss_val = sess.run([train_step, loss], feed_dict=feed_dict)

                print('######################################')
                print('loss_val ', loss_val, epoch)
                train_loss.append(loss_val)
                # print("run model")
                # print('global %d, epoch %d, iter %d: loss : %.4f' %
                #       (global_step_val, epoch, iter, loss_val))
                train_loss.append(loss_val)
                iter += 1
                # break

            plt.legend()
            p += [np.mean(train_loss)]
            plt.plot(p, 'b-')
            plt.pause(0.05)

            # ---------------------- validation ----------------------
            print('validate...')
            print('   compute global descriptors')
            input_data.reset_scan()
            sat_global_descriptor = np.zeros(
                [input_data.get_test_dataset_size(), 4096])
            grd_global_descriptor = np.zeros(
                [input_data.get_test_dataset_size(), 4096])
            val_i = 0
            while True:
                print('      progress %d' % val_i)
                # get the sat and grd batch; this is just the input images
                batch_sat, batch_grd, batch_coords = input_data.next_batch_scan(
                    batch_size)
                if batch_sat is None:
                    break  # break once all batches are over
                # create a dictionary
                feed_dict = {
                    sat_x: batch_sat,
                    grd_x: batch_grd,
                    geo_coords: batch_coords,
                    keep_prob: 1.0
                }

                # this dictionary stores all the global descriptors
                sat_global_val, grd_global_val = \
                    sess.run([sat_global, grd_global], feed_dict=feed_dict)
                # print('sat_global_val ', sat_global_val)

                val_loss.append(sess.run(loss, feed_dict=feed_dict))

                sat_global_descriptor[
                    val_i:val_i + sat_global_val.shape[0], :] = sat_global_val
                grd_global_descriptor[
                    val_i:val_i + grd_global_val.shape[0], :] = grd_global_val
                val_i += sat_global_val.shape[0]  # batch_size

            # print('val_loss ', val_loss)
            p_val += [np.mean(val_loss)]
            plt.plot(p_val, 'r-')
            plt.pause(0.05)

            print('   compute accuracy')
            val_accuracy, val_accuracy_ = validate(grd_global_descriptor,
                                                   sat_global_descriptor)
            p_acc += [val_accuracy]
            p_acc_ += [val_accuracy_]
            plt.plot(p_acc, 'k-')
            plt.pause(0.05)
            plt.plot(p_acc_, 'g-')
            plt.pause(0.05)

            # with open('../Result/' + str(network_type) + '_accuracy.txt', 'a') as file:
            #     file.write(str(epoch) + ' ' + str(iter) + ' : ' + str(val_accuracy) + '\n')
            print('   %d: accuracy_10percent = %.1f%%' %
                  (epoch, val_accuracy * 100.0))
            print('   %d: accuracy_1percent = %.1f%%' %
                  (epoch, val_accuracy_ * 100.0))
            cwd = os.getcwd()

            if (epoch == 119):
                plt.savefig('loss_vs_epoch.png')
                print('validate all...')
                print('   compute global descriptors')
                input_data.reset_scan()
                sat_global_descriptor = np.zeros(
                    [input_data.get_tt_dataset_size(), 4096])
                grd_global_descriptor = np.zeros(
                    [input_data.get_tt_dataset_size(), 4096])
                val_i = 0
                val_loss = []
                while True:
                    print('      progress %d' % val_i)
                    # get the sat and grd batch; this is just the input images
                    batch_sat, batch_grd = input_data.next_tt_scan(batch_size)
                    if batch_sat is None:
                        break  # break once all batches are over
                    # create a dictionary
                    feed_dict = {
                        sat_x: batch_sat,
                        grd_x: batch_grd,
                        keep_prob: 1.0
                    }

                    # this dictionary stores all the global descriptors
                    sat_global_val, grd_global_val = \
                        sess.run([sat_global, grd_global], feed_dict=feed_dict)
                    # print('sat_global_val ', sat_global_val)

                    val_loss.append(sess.run(loss, feed_dict=feed_dict))

                    sat_global_descriptor[
                        val_i:val_i +
                        sat_global_val.shape[0], :] = sat_global_val
                    grd_global_descriptor[
                        val_i:val_i +
                        grd_global_val.shape[0], :] = grd_global_val
                    val_i += sat_global_val.shape[0]  # is this 64*512?

                print('   compute accuracy')
                total_val_accuracy, total_val_accuracy_ = validate(
                    grd_global_descriptor, sat_global_descriptor)

                # with open('../Result/' + str(network_type) + '_accuracy.txt', 'a') as file:
                #     file.write(str(epoch) + ' ' + str(iter) + ' : ' + str(val_accuracy) + '\n')
                print('   %d: accuracy_10percent = %.1f%%' %
                      (epoch, val_accuracy * 100.0))
                print('   %d: accuracy_1percent = %.1f%%' %
                      (epoch, val_accuracy_ * 100.0))
                print('   %d: accuracy_tt_10percent = %.1f%%' %
                      (epoch, total_val_accuracy * 100.0))
                print('   %d: accuracy_tt_1percent = %.1f%%' %
                      (epoch, total_val_accuracy_ * 100.0))
                cwd = os.getcwd()

            os.chdir('../Model/CVM-Net-I/CVM-Net-I_' + model_name + '/')
            cwd = os.getcwd()
            os.chdir('../../../CVM-Net/')

            model_dir = cwd + '/' + network_type + '/' + str(epoch) + '/'
            if not os.path.exists(model_dir):
                os.makedirs(model_dir)
                # if (epoch > 70 or epoch % 5 == 0):
                # model is saved via saver.sess
                save_path = saver.save(sess, model_dir + 'model.ckpt')
                # sio.savemat(model_dir + 'np_vector_CVM_Net.mat', {'sat_global_descriptor': sat_global_descriptor,
                #                                                   'grd_global_descriptor': grd_global_descriptor})
                print("Model saved in file: %s" % save_path)