Exemple #1
0
def main(_):

    gpu_options = tf.GPUOptions(allow_growth=True)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        network = crossnet.CrossNet(sess)
        network.load_data(data_list, data_dir)

        misc.pprint(network.config.__flags)

        network.train_test()
Exemple #2
0
def main(_):

    gpu_options = tf.GPUOptions(allow_growth=True)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
        # config = tf.ConfigProto(gpu_options=gpu_options)
        # with tf.Session(config=config) as sess:

        network = nasnet.CrossNet(sess)
        network.load_data(data_list, data_dir)

        misc.pprint(network.config.__flags)

        network.train_test()
Exemple #3
0
    def load_data(self, image_list, image_dir=""):
        """Create a queue that outputs batches of images and labels
       label 0~3: [sky, bldg, road, tree]
    """
        self.data_names = []
        with open(image_list, 'r') as fid:
            for line in fid.readlines():
                names = [
                    os.path.join(image_dir, name.strip())
                    for name in line.split(',')
                ]
                keys = ['im_a', 'im_g', 'lb_g']
                self.data_names.append(dict(zip(keys, names)))
        shuffle(self.data_names)

        self.num_samples = len(self.data_names)
        misc.pprint('[*] load %d samples from "%s"' %
                    (self.num_samples, image_list))
Exemple #4
0
    def build_model(self, data, is_training=True):
        raw_aerial, raw_ground, label_ground = data
        self.image_aerial = misc.preprocess_image(raw_aerial,
                                                  self.szs.image_aerial)
        self.image_ground = misc.preprocess_image(raw_ground,
                                                  self.szs.image_ground)
        self.prob_ground = misc.preprocess_label(label_ground,
                                                 self.num_classes,
                                                 self.szs.after_transf)
        self.im_aerial = misc.proprocess_image(self.image_aerial)
        self.im_ground = misc.proprocess_image(self.image_ground)

        self.feat_aerial = models.pixelnet(self.image_aerial,
                                           self.num_classes,
                                           is_training=is_training,
                                           batch_norm=self.batch_norm)
        misc.pprint(
            self.feat_aerial.get_shape().as_list())  # print the feature size

        if is_training:
            feat_aerial_small = self.feat_aerial
        else:
            feat_aerial_small = tf.image.resize_bilinear(
                self.feat_aerial, self.szs.before_transf)

        weights = models.compute_transfweights(self.szs.before_transf,
                                               self.szs.after_transf,
                                               self.conditioned,
                                               is_training=is_training,
                                               batch_norm=self.batch_norm)
        self.feat_aerial2ground = models.transfnet(feat_aerial_small, weights,
                                                   self.szs.after_transf)

        if is_training:
            self.merged = tf.summary.merge_all()
            self.summarizer = tf.summary.FileWriter(self.log_dir,
                                                    self.sess.graph)

            with tf.name_scope("Loss"):
                self.loss_class = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits(
                        None, self.prob_ground, self.feat_aerial2ground))
                self.loss_reg = tf.add_n(
                    tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
                self.loss = self.loss_class + self.loss_reg

            with tf.name_scope("Optimizer"):
                with tf.control_dependencies(
                        tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                    self.step = tf.Variable(0,
                                            name='global_step',
                                            trainable=False)
                    self.optim = tf.train.AdamOptimizer(
                        tf.train.exponential_decay(
                            0.001, self.step, 5000, .7, staircase=True
                        )  # not sure if this is necessary for Adam optimizer
                    ).minimize(self.loss, global_step=self.step)

        self.saver = tf.train.Saver(max_to_keep=10,
                                    write_version=tf.train.SaverDef.V2)
        misc.pprint("[*] build model.")

        self.transfweights, self.transfbiases = tf.get_collection(
            'transformer_weights')
        self.prob_aerial = tf.nn.softmax(self.feat_aerial)
        self.prob_aerial2ground = tf.nn.softmax(self.feat_aerial2ground)

        with tf.name_scope('Vis'):
            self.visual = [ \
                          self.image_aerial, self.image_ground,
                          tf.cast(self.prob_aerial,        tf.float32)/self.num_classes,
                          tf.cast(self.prob_ground,        tf.float32)/self.num_classes,
                          tf.cast(self.prob_aerial2ground, tf.float32)/self.num_classes,
                          self.transfweights]
Exemple #5
0
 def restore(self):
     ckpt = tf.train.get_checkpoint_state(self.ckpt_dir)
     self.saver.restore(self.sess, ckpt.model_checkpoint_path)
     misc.pprint("[*] restore checkpoint from '%s'." % self.ckpt_dir)
    parser.add_argument('--weight_decay', type=float, default=1e-4)
    parser.add_argument('--meta_lr', type=float, default=0.01)
    parser.add_argument('--base_lr', type=float,
                        default=0.01)  # Learning rate for the inner loop
    parser.add_argument(
        '--update_step', type=int,
        default=100)  # The number of updates for the inner loop
    parser.add_argument(
        '--eval_weights', type=str,
        default=None)  # The meta-trained weights for meta-eval phase
    parser.add_argument('--meta_label', type=str,
                        default='exp1')  # Additional label for meta-train

    # Set and print the parameters
    args = parser.parse_args()
    pprint(vars(args))

    # Set manual seed for PyTorch
    if args.seed == 0:
        print('Using random seed.')
        torch.backends.cudnn.benchmark = True
    else:
        print('Using manual seed:', args.seed)
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    # Start trainer for pre-train, meta-train or meta-eval
    if args.phase == 'meta_train':
        trainer = MetaTrainer(args)