Esempio n. 1
0
    def makeGrammar(self, rules):
        """
        Produce a class from a collection of rules.

        @param rules: A mapping of names to rule bodies.
        """
        lines = list(itertools.chain(*[
            self._function("def rule_%s(self):" % (name,),
                           ["_locals = {'self': self}",
                            "self.locals[%r] = _locals" % (name,)] +
                           list(body)) + ['\n\n'] for (name, body) in rules]))
        source = '\n'.join(self._suite(
            "class %s(%s):" % (self.name, self.superclass.__name__),
            lines))
        modname = "mymeta_grammar__" + self.name
        save_source(source, modname)
        filename = "/mymeta_generated_code/" + modname + ".py"
        mod = module(modname)
        mod.__dict__.update(self.globals)
        mod.__name__ = modname
        mod.__dict__[self.superclass.__name__] = self.superclass
        mod.__loader__ = GeneratedCodeLoader(source)
        try:
            code = compile(source, filename, "exec")
        except SyntaxError:
            raise source
        eval(code, mod.__dict__)  # pylint: disable=eval-used
        mod.__dict__[self.name].globals = self.globals
        sys.modules[modname] = mod
        linecache.getlines(filename, mod.__dict__)
        return mod.__dict__[self.name]
Esempio n. 2
0
def generate_images_from_folder(model,
                                sess,
                                test_data_dir=None,
                                train_data_dir=None):
    if test_data_dir:
        source, paths = val_generator.load_imgs(test_data_dir, 128)
    else:
        source, paths = val_generator.next_source_imgs(0, 128, batch_size=256)

    if train_data_dir:
        train_imgs, _ = generator.load_imgs(train_data_dir, 128)
    else:
        train_imgs, _ = generator.next_source_imgs(
            0, 128, batch_size=FLAGS.batch_size - 1)

    assert train_imgs.shape[0] == (FLAGS.batch_size - 1)

    for i in range(len(paths)):
        print(i)
        temp = np.reshape(source[i], (1, 128, 128, 3))
        save_source(temp, [1, 1], os.path.join(FLAGS.save_dir, paths[i]))
        images = np.concatenate((temp, train_imgs), axis=0)
        for j in range(1, generator.n_classes):
            true_label_fea = generator.label_features_128[j]
            dict = {
                model.imgs: images,
                model.true_label_features_128: true_label_fea,
            }
            samples = sess.run(model.ge_samples, feed_dict=dict)
            image = np.reshape(samples[0, :, :, :], (1, 128, 128, 3))
            # generator.save_batch(samples, paths, FLAGS.save_dir, index=j, if_target=True)
            save_images(
                image, [1, 1],
                os.path.join(FLAGS.save_dir, paths[i] + '_' + str(j) + '.jpg'))
Esempio n. 3
0
def my_train():
    with tf.Graph().as_default():
        sess = tf.Session(config=config)
        model = FaceAging(sess=sess,
                          lr=FLAGS.learning_rate,
                          keep_prob=1.,
                          model_num=FLAGS.model_index,
                          batch_size=FLAGS.batch_size,
                          age_loss_weight=FLAGS.age_loss_weight,
                          gan_loss_weight=FLAGS.gan_loss_weight,
                          fea_loss_weight=FLAGS.fea_loss_weight,
                          tv_loss_weight=FLAGS.tv_loss_weight)

        imgs = tf.placeholder(
            tf.float32,
            [FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 3])
        true_label_features_128 = tf.placeholder(
            tf.float32, [FLAGS.batch_size, 128, 128, FLAGS.age_groups])
        true_label_features_64 = tf.placeholder(
            tf.float32, [FLAGS.batch_size, 64, 64, FLAGS.age_groups])
        false_label_features_64 = tf.placeholder(
            tf.float32, [FLAGS.batch_size, 64, 64, FLAGS.age_groups])
        age_label = tf.placeholder(tf.int32, [FLAGS.batch_size])

        source_img_227, source_img_128, face_label = load_source_batch3(
            FLAGS.source_file, FLAGS.root_folder, FLAGS.batch_size)

        model.train_age_lsgan_transfer(source_img_227, source_img_128, imgs,
                                       true_label_features_128,
                                       true_label_features_64,
                                       false_label_features_64,
                                       FLAGS.fea_layer_name, age_label)

        ge_samples = model.generate_images(imgs,
                                           true_label_features_128,
                                           reuse=True,
                                           mode='train')

        # Create a saver.
        model.saver = tf.train.Saver(model.save_d_vars + model.save_g_vars,
                                     max_to_keep=200)
        model.alexnet_saver = tf.train.Saver(model.alexnet_vars)
        model.age_saver = tf.train.Saver(model.age_vars)

        d_error = model.d_loss / model.gan_loss_weight
        g_error = model.g_loss / model.gan_loss_weight
        fea_error = model.fea_loss / model.fea_loss_weight
        age_error = model.age_loss / model.age_loss_weight

        # Start running operations on the Graph.
        sess.run(tf.global_variables_initializer())
        tf.train.start_queue_runners(sess)

        model.alexnet_saver.restore(sess, FLAGS.alexnet_pretrained_model)
        model.age_saver.restore(sess, FLAGS.age_pretrained_model)

        if model.load(FLAGS.checkpoint_dir, model.saver):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        print("{} Start training...")

        # Loop over max_steps
        for step in range(FLAGS.max_steps):
            images, t_label_features_128, t_label_features_64, f_label_features_64, age_labels = \
                train_generator.next_target_batch_transfer2()
            dict = {
                imgs: images,
                true_label_features_128: t_label_features_128,
                true_label_features_64: t_label_features_64,
                false_label_features_64: f_label_features_64,
                age_label: age_labels
            }
            for i in range(d_iter):
                _, d_loss = sess.run([model.d_optim, d_error], feed_dict=dict)

            for i in range(g_iter):
                _, g_loss, fea_loss, age_loss = sess.run(
                    [model.g_optim, g_error, fea_error, age_error],
                    feed_dict=dict)

            format_str = (
                '%s: step %d, d_loss = %.3f, g_loss = %.3f, fea_loss=%.3f, age_loss=%.3f'
            )
            print(format_str %
                  (datetime.now(), step, d_loss, g_loss, fea_loss, age_loss))

            # Save the model checkpoint periodically.
            if step % SAVE_INTERVAL == SAVE_INTERVAL - 1 or (
                    step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.checkpoint_dir)
                model.save(checkpoint_path, step, 'acgan')

            if step % VAL_INTERVAL == VAL_INTERVAL - 1:
                if not os.path.exists(FLAGS.sample_dir):
                    os.makedirs(FLAGS.sample_dir)
                path = os.path.join(FLAGS.sample_dir, str(step))
                if not os.path.exists(path):
                    os.makedirs(path)

                source = sess.run(source_img_128)
                save_source(source, [4, 8], os.path.join(path, 'source.jpg'))
                for j in range(train_generator.n_classes):
                    true_label_fea = train_generator.label_features_128[j]
                    dict = {
                        imgs: source,
                        true_label_features_128: true_label_fea
                    }
                    samples = sess.run(ge_samples, feed_dict=dict)
                    save_images(samples, [4, 8],
                                './{}/test_{:01d}.jpg'.format(path, j))
Esempio n. 4
0
def my_train():
    with tf.Graph().as_default():
        #기본 작업 : 플레이스 홀더(변수) 선언 및 하이퍼 파라미터 전달 !
        sess = tf.Session(config=config)
        model = FaceAging(sess=sess,
                          lr=FLAGS.learning_rate,
                          keep_prob=1.,
                          model_num=FLAGS.model_index,
                          batch_size=FLAGS.batch_size,
                          age_loss_weight=FLAGS.age_loss_weight,
                          gan_loss_weight=FLAGS.gan_loss_weight,
                          fea_loss_weight=FLAGS.fea_loss_weight,
                          tv_loss_weight=FLAGS.tv_loss_weight)

        imgs = tf.placeholder(
            tf.float32,
            [FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 3
             ])  # (_,128,128,3)
        true_label_features_128 = tf.placeholder(
            tf.float32, [FLAGS.batch_size, 128, 128, FLAGS.age_groups])
        true_label_features_64 = tf.placeholder(
            tf.float32, [FLAGS.batch_size, 64, 64, FLAGS.age_groups])
        false_label_features_64 = tf.placeholder(
            tf.float32, [FLAGS.batch_size, 64, 64, FLAGS.age_groups])
        age_label = tf.placeholder(tf.int32, [FLAGS.batch_size])

        # _배치 1회!  저장 source_input.py ---------------------------------------------------------------------
        source_img_227, source_img_128, face_label = load_source_batch3(
            FLAGS.source_file, FLAGS.root_folder, FLAGS.batch_size)
        # --------------------------------------------------------------------------------------------------
        #        with tf.Session():
        #           print(face_label.eval())

        # source_img_227, source_img_128,및 placeholder 전달
        model.train_age_lsgan_transfer(source_img_227, source_img_128, imgs,
                                       true_label_features_128,
                                       true_label_features_64,
                                       false_label_features_64,
                                       FLAGS.fea_layer_name, age_label)
        # age_label 이아니라 face_label 사용해야 하는 거 아닌가 ?????????????

        # placeholder 전달 ,ge_samples는 중간 과정 체크용 사진 생성
        ge_samples = model.generate_images(imgs,
                                           true_label_features_128,
                                           reuse=True,
                                           mode='train')

        # Create a saver.
        model.saver = tf.train.Saver(model.save_d_vars + model.save_g_vars,
                                     max_to_keep=200)
        model.alexnet_saver = tf.train.Saver(model.alexnet_vars)
        model.age_saver = tf.train.Saver(model.age_vars)

        d_error = model.d_loss / model.gan_loss_weight
        g_error = model.g_loss / model.gan_loss_weight
        fea_error = model.fea_loss / model.fea_loss_weight
        age_error = model.age_loss / model.age_loss_weight

        # Start running operations on the Graph.
        sess.run(tf.global_variables_initializer())
        tf.train.start_queue_runners(sess)

        model.alexnet_saver.restore(sess, FLAGS.alexnet_pretrained_model)
        model.age_saver.restore(sess, FLAGS.age_pretrained_model)

        if model.load(FLAGS.checkpoint_dir, model.saver):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        print("{} Start training...")

        # tensorboard --logdir=./logs
        writer = tf.summary.FileWriter("./logs", sess.graph)
        '''
        img : variable that contains img
        
        '''

        # Loop over max_steps
        for step in range(FLAGS.max_steps):  #200000
            #-------------------------------------------------------------------------------------------------------
            #data_generator.py 배치 200000번 저장!: images, t_label_features_128, t_label_features_64, f_label_features_64, age_labels:실제 데이터 배치 학습반복
            images, t_label_features_128, t_label_features_64, f_label_features_64, age_labels = \
                train_generator.next_target_batch_transfer2()
            dict = {
                imgs: images,
                true_label_features_128:
                t_label_features_128,  #condition for resnet generator
                true_label_features_64:
                t_label_features_64,  #condition for discriminator
                false_label_features_64:
                f_label_features_64,  #condition for discriminator
                age_label: age_labels
            }

            print(len(images))
            print("train discriminator------------------------")
            #          import ipdb
            #         ipdb.set_trace()
            for i in range(d_iter):  # 1
                _, d_loss = sess.run(
                    [model.d_optim, d_error],
                    feed_dict=dict)  # dict가 위 placeholder 에 모두 feed?
            print("train generator--------------------------")
            for i in range(g_iter):  # 1
                _, g_loss, fea_loss, age_loss = sess.run(
                    [model.g_optim, g_error, fea_error, age_error],
                    feed_dict=dict)

            format_str = (
                '%s: step %d, d_loss = %.3f, g_loss = %.3f, fea_loss=%.3f, age_loss=%.3f'
            )
            print(format_str %
                  (datetime.now(), step, d_loss, g_loss, fea_loss, age_loss))

            #---------------------------------------------------------------------------------------------------

            # Save the model checkpoint periodically
            if step % SAVE_INTERVAL == SAVE_INTERVAL - 1 or (
                    step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.checkpoint_dir)
                model.save(checkpoint_path, step, 'acgan')

            if step % VAL_INTERVAL == VAL_INTERVAL - 1:
                if not os.path.exists(FLAGS.sample_dir):
                    os.makedirs(FLAGS.sample_dir)
                path = os.path.join(FLAGS.sample_dir, str(step))
                if not os.path.exists(path):
                    os.makedirs(path)

                source = sess.run(source_img_128)
                save_source(source, [4, 8], os.path.join(path, 'source.jpg'))
                for j in range(train_generator.n_classes):
                    true_label_fea = train_generator.label_features_128[j]
                    dict = {
                        imgs: source,
                        true_label_features_128: true_label_fea
                    }
                    samples = sess.run(ge_samples, feed_dict=dict)
                    save_images(samples, [4, 8],
                                './{}/test_{:01d}.jpg'.format(path, j))
Esempio n. 5
0
def main(_run, _config, _log):
    '''
    _config: dictionary; its keys and values are the variables setting in the cfg function
    _run: run object defined by Sacred, can be used to record hashable values and get some information, e.g. run id, for a run
    _log: logger object provided by Sacred, but is not very flexible, we can define loggers by oureselves
    '''

    config = dcopy(
        _config
    )  # We need this step because Sacred does not allow us to change _config object
    # But sometimes we need to add some key-value pairs to config
    torch.cuda.set_device(config['gpu_id'])

    save_source(_run)  # Source code are saved by running this line
    init_seed(config['seed'])
    logger = init_logger(log_root=_run.observers[0].dir, file_name='log.txt')

    output_folder_path = opjoin(_run.observers[0].dir,
                                config['path']['output_folder_name'])
    os.makedirs(output_folder_path, exist_ok=True)

    best_acc_list = []
    last_acc_list = []
    train_best_list = []
    train_last_list = []

    best_epoch = []

    data = load_data(config=config)
    split_iterator = range(config['data']['random_split']['num_splits']) \
                     if config['data']['random_split']['use'] \
                    else range(1)

    config['adj'] = data[0]

    for i in split_iterator:
        output_folder = opjoin(output_folder_path, str(i))
        os.makedirs(output_folder, exist_ok=True)

        if config['data']['random_split']['use']:
            data = resplit(
                dataset=config['data']['dataset'],
                data=data,
                full_sup=config['data']['full_sup'],
                num_classes=torch.unique(data[2]).shape[0],
                num_nodes=data[1].shape[0],
                num_per_class=config['data']['label_per_class'],
            )
            print(torch.sum(data[3]))

        model = GNN(config=config)

        if i == 0:
            logger.info(model)

        if config['use_gpu']:
            model.cuda()
            data = [
                each.cuda() if hasattr(each, 'cuda') else each for each in data
            ]

        optimizer = init_optimizer(
            params=model.parameters(),
            optim_type=config['optim']['type'],
            lr=config['optim']['lr'],
            weight_decay=config['optim']['weight_decay'],
            momentum=config['optim']['momemtum'])

        criterion = nn.NLLLoss()

        best_model_path = opjoin(output_folder, 'best_model.pth')
        last_model_path = opjoin(output_folder, 'last_model.pth')
        best_dict_path = opjoin(output_folder, 'best_pred_dict.pkl')
        last_dict_path = opjoin(output_folder, 'last_pred_dict.pkl')
        losses_curve_path = opjoin(output_folder, 'losses.pkl')
        accs_curve_path = opjoin(output_folder, 'accs.pkl')
        best_state_path = opjoin(output_folder, 'best_state.pkl')
        grads_path = opjoin(output_folder, 'grads.pkl')

        best_pred_dict, last_pred_dict, train_losses, train_accs, \
        val_losses, val_accs, best_state, grads, model_state = train(best_model_path,
                                                       last_model_path,
                                                       config,
                                                       criterion,
                                                       data,
                                                       logger,
                                                       model,
                                                       optimizer
                                                       )
        last_model_state, best_model_state = model_state

        losses_dict = {'train': train_losses, 'val': val_losses}

        accs_dict = {'train': train_accs, 'val': val_accs}
        logger.info(f'split_seed: {i: 04d}')
        logger.info(f'Test set results on the last model:')
        last_pred_dict = test(
            criterion,
            data,
            last_model_path,
            last_pred_dict,
            logger,
            model,
            last_model_state,
        )

        logger.info(f'Test set results on the best model:')
        if config['fastmode']:
            best_pred_dict = last_pred_dict
        else:
            best_pred_dict = test(
                criterion,
                data,
                best_model_path,
                best_pred_dict,
                logger,
                model,
                best_model_state,
            )

        logger.info('\n')

        check_before_pkl(best_pred_dict)
        with open(best_dict_path, 'wb') as f:
            pkl.dump(best_pred_dict, f)

        check_before_pkl(last_pred_dict)
        with open(last_dict_path, 'wb') as f:
            pkl.dump(last_pred_dict, f)

        check_before_pkl(losses_dict)
        with open(losses_curve_path, 'wb') as f:
            pkl.dump(losses_dict, f)

        check_before_pkl(accs_dict)
        with open(accs_curve_path, 'wb') as f:
            pkl.dump(accs_dict, f)

        check_before_pkl(best_state)
        with open(best_state_path, 'wb') as f:
            pkl.dump(best_state, f)

        check_before_pkl(grads)
        with open(grads_path, 'wb') as f:
            pkl.dump(grads, f)

        best_acc_list.append(best_pred_dict['test acc'].item())
        last_acc_list.append(last_pred_dict['test acc'].item())
        train_best_list.append(best_state['train acc'].item())
        train_last_list.append(train_accs[-1].item())
        best_epoch.append(best_state['epoch'])

    logger.info('********************* STATISTICS *********************')
    np.set_printoptions(precision=4, suppress=True)
    logger.info(f"\n"
                f"Best test acc: {best_acc_list}\n"
                f"Mean: {np.mean(best_acc_list)}\t"
                f"Std: {np.std(best_acc_list)}\n"
                f"Last test acc: {last_acc_list}\n"
                f"Mean: {np.mean(last_acc_list)}\t"
                f"Std: {np.std(last_acc_list)}\n")

    logger.info(f"best epoch: {best_epoch}")