num_img_tr = len(trainloader)
    num_img_ts = len(testloader)
    running_loss_tr = 0.0
    running_loss_ts = 0.0
    aveGrad = 0
    global_step = 0
    print("Training Network")
    min_mean_error = 1000.0

    # Main Training and Testing Loop
    for epoch in range(resume_epoch, nEpochs):
        # print(epoch)
        start_time = timeit.default_timer()

        if epoch % p['epoch_size'] == p['epoch_size'] - 1:
            lr_ = utils.lr_poly(p['lr'], epoch, nEpochs, 0.9)
            print('(poly lr policy) learning rate: ', lr_)

            optimizer = optim.Adam(net.parameters(),
                                   lr=p['lr'],
                                   weight_decay=p['wd'])
        net.train()
        for ii, sample_batched in enumerate(trainloader):

            # inputs, labels = sample_batched['image'], sample_batched['label']
            inputs, labels = sample_batched[0], sample_batched[1]
            # Forward-Backward of the mini-batch
            inputs, labels = Variable(inputs,
                                      requires_grad=True), Variable(labels)
            global_step += inputs.data.shape[0]
예제 #2
0
    utils.generate_param_report(os.path.join(save_dir, exp_name + '.txt'), p)

    num_img_tr = len(trainloader)
    num_img_ts = len(testloader)
    running_loss_tr = 0.0
    running_loss_ts = 0.0
    aveGrad = 0
    print("Training Network")

    # Main Training and Testing Loop
    for epoch in range(resume_epoch, nEpochs):
        start_time = timeit.default_timer()

        if epoch % p['epoch_size'] == p['epoch_size'] - 1:
            lr_ = utils.lr_poly(p['lr'], epoch, nEpochs, 0.9)
            print('(poly lr policy) learning rate: ', lr_)
            optimizer = optim.SGD(net.parameters(), lr=lr_, momentum=p['momentum'], weight_decay=p['wd'])

        net.train()
        for ii, sample_batched in enumerate(trainloader):

            inputs, gts = sample_batched['image'], sample_batched['gt']
            # Forward-Backward of the mini-batch
            inputs, gts = Variable(inputs, requires_grad=True), Variable(gts)

            if gpu_id >= 0:
                inputs, gts = inputs.cuda(), gts.cuda()

            output = net.forward(inputs)