Пример #1
0
def analyseData():
    TTW, distances, rent = utils.loadData(year, model_number, bootstrap_id)
    ttwml = TTWML.TTWML(distances,
                        TTW,
                        logForm=True if model_number in [2, 5] else False)
    getEntropy(TTW, print_=True)
    getAverageTimeToWork(TTW, ttwml, distances, print_=True)
Пример #2
0
def generateAllTTWs():
    for year in [2011, 2016]:
        for model_number in range(1, 6):
            for i in range(bootstrap_repeats):
                bootstrap_id = "{:02d}".format(i)
                trial_name = 'Sydney{}_{}-{}'.format(year, model_number,
                                                     bootstrap_id)
                savePath = 'Results/{}'.format(trial_name)
                print("Generating for:", model_number, bootstrap_id, year)
                TTW, distances, _ = utils.loadData(year, model_number,
                                                   bootstrap_id)
                ttwml = TTWML.TTWML(
                    distances,
                    TTW,
                    logForm=True if model_number in [2, 5] else False)

                generateTTWs(ttwml, savePath, repeats=100)
Пример #3
0
def analyseShuffleNull(repeats=10):
    from utils.generate_bootstrap import getCurrentHouseholds, getTTW

    TTW, distances, _ = utils.loadData(year, model_number, bootstrap_id)
    ttwml = TTWML.TTWML(distances, TTW, logForm=False)
    householdsHome, householdsWork = getCurrentHouseholds(TTW)
    np.random.seed(10)

    comparisons_with_data = []
    entropy = []
    time_to_work = []

    for _ in range(repeats):
        np.random.shuffle(householdsWork)  # Modifies in-place
        shuffledHouseholds = np.vstack([householdsHome, householdsWork]).T
        newTTW = getTTW(shuffledHouseholds, len(TTW))

        comparisons_with_data.append(getComparisonWithData(ttwml, newTTW, TTW))
        entropy.append(getEntropy(newTTW))
        time_to_work.append(getAverageTimeToWork(newTTW, ttwml, distances))
    return np.array(comparisons_with_data).mean(
        axis=0), np.array(entropy).mean(axis=0), np.array(time_to_work).mean(
            axis=0)
Пример #4
0
def val(net,
        data_loader,
        criterion,
        labelConverter,
        batchSize=64,
        max_iter=0,
        n_display=10):
    """
    :param net:
    :param dataset:
    :param criterion: loss function
    :param labelConverter: label和文字转换器
    :param batchSize: 批次大小
    :param max_iter: 验证多少个iteration, max_iter=0时测试完整个数据集
    :param n_display: 打印多少条结果
    :return:
    """
    print('Start val...')
    device = next(net.parameters()).device  # get model device
    net.eval()

    val_iter = iter(data_loader)

    with torch.no_grad():
        image = torch.empty(0, dtype=torch.float32, device=device)
        text = torch.empty(0, dtype=torch.int32, device=device)
        length = torch.empty(0, dtype=torch.int32, device=device)
        n_correct = 0
        total_img = 0
        loss_avg = utils.Averager()
        max_iter = len(data_loader) if max_iter == 0 else min(
            max_iter, len(data_loader))
        for _ in tqdm(range(max_iter)):
            data = next(val_iter)
            cpu_images, cpu_texts = data
            batch_size = cpu_images.size(0)
            utils.loadData(image, cpu_images)
            t, l = labelConverter.encode(cpu_texts)
            utils.loadData(text, t)
            utils.loadData(length, l)
            preds = net(image)
            preds_size = torch.IntTensor([preds.size(0)] * batch_size)
            cost = criterion(preds, text, preds_size, length)
            loss_avg.add(cost)

            _, preds = preds.max(2)
            # preds = preds.squeeze(2)
            preds = preds.transpose(1, 0).contiguous().view(-1)
            sim_preds = labelConverter.decode(preds.data,
                                              preds_size.data,
                                              raw=False)
            total_img += batch_size
            for pred, target in zip(sim_preds, cpu_texts):
                if pred == target.lower():
                    n_correct += 1

        raw_preds = labelConverter.decode(preds.data,
                                          preds_size.data,
                                          raw=True)[:n_display]
        for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts):
            print(f'{raw_pred:<{20}} => {pred:<{20}}, gt: {gt}')

    precision = n_correct / float(total_img)
    prt_msg = (
        f'Test loss: {loss_avg.val():.3f}  n_correct:{n_correct}  total_img: {total_img}  '
        f'precision: {precision:.3f}')
    vals = {
        'loss': loss_avg.val(),
        'n_correct': n_correct,
        'total_img': total_img,
        'precision': precision
    }
    return vals, prt_msg
Пример #5
0
            maxidx = np.argmax(fisherHistOutput)
            c_at_max = paramSteps[maxidx + 1]
    plt.axvline(params['c_w'],
                color='k',
                linestyle='--',
                label='Maximum Likelihood c')
    plt.legend()
    plt.xlabel('c')
    plt.ylabel('Fisher Information')
    plt.show()

    return c_at_max


if __name__ == "__main__":
    TTW, distances, rent = utils.loadData(year, model_number, bootstrap_id)
    ttwml = TTWML.TTWML(distances,
                        TTW,
                        logForm=True if model_number in [2, 5] else False)

    params = loadParameters(savePath)

    plotDistanceHistograms(params, ttwml, distances, 'c_w', [0.5, 1, 1.5])

    c_at_max = plotFisher(params, ttwml, distances)
    new_params = params.copy()
    new_params['c_w'] = c_at_max
    max_loglikelihood = getLogLikelihood(ttwml, params, TTW, print_=True)
    loglikelihood_at_peak = getLogLikelihood(ttwml,
                                             new_params,
                                             TTW,
Пример #6
0
def main(_):

  # Environment Setting
  os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
  os.environ["CUDA_VISIBLE_DEVICES"] = cfg.device_id
  
  cfg.results = '{}/{}'.format(cfg.results, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
  cfg.checkpoint = '{}/{}'.format(cfg.checkpoint, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
  cfg.summary_dir = '{}/{}'.format(cfg.summary_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
  
  if not os.path.exists(cfg.results): os.makedirs(cfg.results)
  if not os.path.exists(cfg.checkpoint): os.makedirs(cfg.checkpoint)
  if not os.path.exists(cfg.summary_dir): os.makedirs(cfg.summary_dir)
  shutil.copy('./main.py', './{}/main.py'.format(cfg.checkpoint))
  shutil.copy('./config.py', './{}/config.py'.format(cfg.checkpoint))
  shutil.copy('./Layer1_Generator.py', './{}/Layer1_Generator.py'.format(cfg.checkpoint))
  shutil.copy('./Layer2_Generator.py', './{}/Layer2_Generator.py'.format(cfg.checkpoint))
  
  # Construct Networks
  data_feed = loadData(batch_size=cfg.batch_size, train_shuffle=True)  # False
  source, normal_f, normal_s, num_batch = data_feed.get_train()

  src_set = tf.compat.v1.placeholder(tf.float32, [cfg.batch_size, 224, 224, 3], name='source_l2')
  nml_f_set = tf.compat.v1.placeholder(tf.float32, [cfg.batch_size, 224, 224, 3], name='source_l2')
  nml_s_set = tf.compat.v1.placeholder(tf.float32, [cfg.batch_size, 224, 224, 3], name='source_l2')
  src_f_l2_set = tf.compat.v1.placeholder(tf.float32, [cfg.batch_size, 224, 224, 3], name='source_l2')
  src_s_l2_set = tf.compat.v1.placeholder(tf.float32, [cfg.batch_size, 224, 224, 3], name='source_l2')
  nml_ff_l2_set = tf.compat.v1.placeholder(tf.float32, [cfg.batch_size, 224, 224, 3], name='normal_f_l2')
  nml_sf_l2_set = tf.compat.v1.placeholder(tf.float32, [cfg.batch_size, 224, 224, 3], name='normal_s_l2')
  nml_fs_l2_set = tf.compat.v1.placeholder(tf.float32, [cfg.batch_size, 224, 224, 3], name='normal_f_l2')
  nml_ss_l2_set = tf.compat.v1.placeholder(tf.float32, [cfg.batch_size, 224, 224, 3], name='normal_s_l2')
  is_train = tf.compat.v1.placeholder(tf.bool, name='is_train')

  net1 = Layer1_Generator()
  train_op, gen, loss = net1.build_up(source, normal_f, normal_s, is_train)
  net2 = Layer2_Generator()
  train_op_ref, gen_ref, loss_ref = net2.build_up(src_set, nml_f_set, nml_s_set, src_f_l2_set,
                                                  src_s_l2_set, nml_ff_l2_set, nml_sf_l2_set,
                                                  nml_fs_l2_set, nml_ss_l2_set, is_train)

  # Train or Test
  config = tf.compat.v1.ConfigProto()
  config.gpu_options.allow_growth = True

  with tf.compat.v1.Session(config=config) as sess:
    sess.run(tf.global_variables_initializer())
    # load pretrained with different name tensorflow
    # Start Thread
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    saver = tf.compat.v1.train.Saver(max_to_keep=0)  #
    if cfg.is_pretrained:
      var = tf.global_variables()
      var_to_restore = [val for val in var if 'decoder_f' in val.name or 'decoder_s' in val.name or 'discriminator_f' in val.name or 'discriminator_s' in val.name]
      saver = tf.compat.v1.train.Saver(var_to_restore)
      saver.restore(sess, cfg.checkpoint_ft)
      print('Load Finetuned Model Successfully!')

    if cfg.is_resume:
      sess.run(tf.global_variables_initializer())
      saver.restore(sess, cfg.checkpoint_ft)
      print('Resumed Model Successfully!')

    writer = tf.compat.v1.summary.FileWriter(cfg.summary_dir, sess.graph)

    for epoch in range(cfg.epoch):
      for step in range(num_batch):


        ## Train Layer-1 generators
        if (step < 25 and epoch == 0):
          critic = 25
        else:
          critic = cfg.critic
        for i in range(critic):
            _ = sess.run(train_op[2], {is_train:True})  # Train front-view discriminator
            _ = sess.run(train_op[3], {is_train:True})  # Train front-view discriminator
        _ = sess.run(train_op[0], {is_train: True})  # Train front-view generator
        _, fl, sl, ftl, dl, gl, summary = sess.run([train_op[1], loss[0], loss[1], loss[2], loss[3], loss[4], loss[5]], {is_train: True})  # Train side-view generator
        print('{}-{}, Fea Loss:{:.4f}, Sym Loss:{:.4f}, Reconst Loss:{:.4f}, D Loss:{:.4f}, G Loss:{:.4f}'
              .format(epoch, step, fl*cfg.lambda_fea, sl*cfg.lambda_sym, ftl*cfg.lambda_l1, dl, gl))

        ## Train Layer-2 generators
        def Argument():
          src_f_l2, src_s_l2, nml_ff_l2, nml_ss_l2, nml_sf_l2, nml_fs_l2, src, nml_f, nml_s = sess.run([gen[0], gen[1], gen[2], gen[3],
                                                                                                        gen[4], gen[5], gen[6], gen[7],
                                                                                                        gen[8]],  {is_train: False})  # Train front-view generator
          InputAugument = {src_set: src, nml_f_set: nml_f, nml_s_set: nml_s, src_f_l2_set: src_f_l2,
                           src_s_l2_set: src_s_l2, nml_ff_l2_set: nml_ff_l2, nml_sf_l2_set: nml_sf_l2,
                           nml_fs_l2_set: nml_fs_l2, nml_ss_l2_set: nml_ss_l2, is_train: True}
          return InputAugument
        if (step < 25 and epoch == 0):
          critic = 25
        else:
          critic = cfg.critic
        for i in range(critic):
            _ = sess.run(train_op_ref[2], Argument())  # Train front-view discriminator
            _ = sess.run(train_op_ref[3], Argument())  # Train front-view discriminator
        _ = sess.run(train_op_ref[0], Argument())  # Train front-view generator
        _, fl, sl, ftl, dl, gl, summary = sess.run([train_op_ref[1], loss_ref[0], loss_ref[1],
                                                    loss_ref[2], loss_ref[3], loss_ref[4], loss_ref[5]], Argument())  # Train side-view generator
        print('Layer-2, epoch {}- step {}, Fea Loss:{:.4f}, Sym Loss:{:.4f}, Reconst Loss:{:.4f}, D Loss:{:.4f}, G Loss:{:.4f}'
              .format(epoch, step, fl*cfg.lambda_fea, sl*cfg.lambda_sym, ftl*cfg.lambda_l1, dl, gl))


        # Save Model and Summary and Test
        if(step % cfg.save_freq == 0):
          writer.add_summary(summary, epoch*num_batch + step)
          print("Saving Model....")
          if cfg.is_pretrained:
            saver_pre = tf.train.Saver()
            saver_pre.save(sess, os.path.join(cfg.checkpoint, 'ck-epoch{}-step{}'.format(epoch, step)))
          else:
            saver.save(sess, os.path.join(cfg.checkpoint, 'ck-epoch{}-step{}'.format(epoch, step)))  #

          for i in range(int(800/cfg.batch_size)):
            te_profile, te_front = data_feed.get_test_batch(cfg.batch_size)
            images_f, images_s = sess.run([gen[0], gen[1]], {source: te_profile, is_train: False}) #
            data_feed.save_images(images_f, 'f', epoch, step)
            data_feed.save_images(images_s, 's', epoch, step)
            images_f_ref, images_s_ref = sess.run([gen_ref[0], gen_ref[1]], {src_set: te_profile, is_train: False})  #
            data_feed.save_images(images_f_ref, '2nd_f', epoch, step)
            data_feed.save_images(images_s_ref, '2nd_s', epoch, step)

    
    # Close Threads
    coord.request_stop()
    coord.join(threads)
Пример #7
0
    # loss Averager
    loss_avg = utils.Averager()
    # ### begin training
    iteration, total_iter = 0, len(train_loader) * opt.nepoch
    best_precision = 0
    for epoch in range(opt.nepoch):
        for i, data in enumerate(train_loader, start=1):
            iteration += 1
            for p in net_crnn.parameters():
                p.requires_grad = True
            net_crnn.train()

            # ### train one batch ################################
            cpu_images, cpu_texts = data
            batch_size = cpu_images.size(0)
            utils.loadData(image, cpu_images)
            t, l = str2label.encode(cpu_texts)
            utils.loadData(text, t)
            utils.loadData(length, l)

            preds = net_crnn(image)
            preds_size = torch.LongTensor([preds.size(0)] * batch_size)
            preds = preds.to(torch.float32)
            cost = ctc_loss(preds, text, preds_size, length)
            optimizer.zero_grad()
            cost.backward()
            optimizer.step()
            scheduler.step(cost)
            ###########################################
            loss_avg.add(cost)