Beispiel #1
0
correct = 0
total = 0
eval_loss = 0
eval_loss_v = 0
board_loss_every = len(train_loader) // 100  # 32686//100
print("board_loss_every " + str(board_loss_every) + "...")
board_eval_every = len(train_loader) // 10  # 32686//100
print("board_eval_every " + str(board_eval_every) + "...")

writer = SummaryWriter(
    "../tmp_log/train_stage1_" + args.net + "_" + args.loss + "_" +
    str(args.lr) + "_" +
    time.strftime("%m-%d:%H-%M-%S", time.localtime(time.time())) + "/")

eval_tool = EvalTool(batch_size=args.batch_size * num_gpus,
                     transform=transform_eval,
                     tb_writer=writer)


#######################################################################################################################
def train(epoch):
    global lr, iter_num, total, correct, train_loss, eval_loss, eval_loss_v, board_loss_every
    sys.stdout.write("\n")
    print("--->Training... Epoch = %d" % epoch)
    model.train()

    for batch_idx, (data, target) in tqdm(enumerate(train_loader),
                                          total=len(train_loader)):

        x_step = batch_idx + (epoch - 1) * len(train_loader)
        if x_step % board_eval_every == 0:
Beispiel #2
0
iter_num = 0
train_loss = 0
correct = 0
total = 0
eval_loss = 0
eval_loss_v = 0
# board_loss_every = len(train_loader)//100
# print('board_loss_every '+ str(board_loss_every)+'...')

writer = SummaryWriter(
    "../tmp_log/train_stage2_" + args.net + "_" + args.loss + "_" +
    str(args.lr) + "_" +
    time.strftime("%m-%d:%H-%M-%S", time.localtime(time.time())) + "/")

eval_tool = EvalTool(transform=transform_eval, tb_writer=writer)

lr_change1 = int(1 * len(train_loader))
lr_change2 = int(2 * len(train_loader))
lr_change3 = int(3 * len(train_loader))
scheduler = optim.lr_scheduler.MultiStepLR(
    optimizer4nn, milestones=[lr_change1, lr_change2, lr_change3], gamma=0.1)

board_loss_every = len(train_loader) // 100  # 32686//100
print("board_loss_every " + str(board_loss_every) + "...")
board_eval_every = len(train_loader) // 10  # 32686//100
print("board_eval_every " + str(board_eval_every) + "...")


def train(epoch):
    global lr, iter_num, total, correct, train_loss, eval_loss, eval_loss_v
    if config.use_mix_data:
        board_loss_every = len(train_loader) // 600
        board_eval_every = len(train_loader) // 60  #32686//100
        board_save_every = len(train_loader) // 6

    print('board_loss_every ' + str(board_loss_every) + '...')
    print('board_eval_every ' + str(board_eval_every) + '...')

    writer = SummaryWriter(
        config.result_dir + 'tmp_log3/train_recog_' + config.prefix + '_' +
        str(config.tik_shape_weight) + '_' + str(config.weight_edge_lm) + '_' +
        time.strftime('%m-%d:%H-%M-%S', time.localtime(time.time())) + '/')

    eval_tool = EvalTool(transform=transform_eval,
                         criterion=criterion,
                         tb_writer=writer,
                         batch_size=16 * num_gpus)

    #scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer4nn, patience=board_loss_every*40, verbose=True)
    loss_mean = 10000
    reject_num = 0
    feat_norm = 0
    norm_shape = 0
    norm_exp = 0

    if config.start_from_warm3d:
        pass
        #load_model(model,config.checkpoint_warm3d+'.pkl')
        #load_model(optimizer4nn,config.checkpoint_warm3d+'_n.pkl')
    # if config.start_from_warmpixel:
    #     load_model_m(optimizer4nn,config.checkpoint_warm_pixel+'_n.pkl')
Beispiel #4
0
                                                   batch_size=40 * num_gpus,
                                                   shuffle=False,
                                                   num_workers=1)
    # evalset_micc =

iter_num = 0
train_loss = 0
correct = 0
total = 0
eval_loss = 0
eval_loss_v = 0

writer = SummaryWriter(
    "../tmp_log/train_all_" + args.net + "_" + str(args.lr) + "_" +
    time.strftime("%m-%d:%H-%M-%S", time.localtime(time.time())) + "/")
eval_tool = EvalTool(batch_size=20, transform=transform_eval, tb_writer=writer)
load_model(model, dict_file)
reject_num = 0
loss_mean = 10000
loss_max = 0
norm_shape = 0
norm_feat = 0


def get_distribution_sampler():
    return lambda n, m, b: torch.Tensor(
        np.concatenate(
            (
                np.random.randn(b, n) * config.shape_ev.reshape((1, -1)),
                np.random.randn(b, m) * config.exp_ev.reshape((1, -1)),
            ),
Beispiel #5
0
total = 0
eval_loss = 0
eval_loss_v = 0

board_loss_every = len(train_loader) // 100  # 32686//100
print("board_loss_every " + str(board_loss_every) + "...")
board_eval_every = len(train_loader) // 10  # 32686//100
print("board_eval_every " + str(board_eval_every) + "...")

writer = SummaryWriter(
    config.result_dir + "tmp_log/train_stage3_" + config.prefix + "_" +
    args.loss + "_" + str(args.lr) + "_" +
    time.strftime("%m-%d:%H-%M-%S", time.localtime(time.time())) + "/")

eval_tool = EvalTool(transform=transform_eval,
                     criterion=criterion,
                     tb_writer=writer)

# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer4nn, patience=board_loss_every*40, verbose=True)
loss_mean = 10000
reject_num = 0
feat_norm = 0
norm_shape = 0
norm_exp = 0


def get_distribution_sampler():
    # print(config.x)
    return lambda b: torch.Tensor(config.gmm_data[np.random.randint(
        0, 6000000, size=b)])