Пример #1
0
fid = open('parameters', 'wb+')
for param in module.parameters():
    b = param.data.numpy()
    fid.write(b)
fid.close()

if use_gpu:
    module.cuda()
    module = nn.DataParallel(module, gpu)

for stage in ([0] * 1):
    #   for epoch in range(1):
    for phase in ["valid"]:
        print("Testing...")
        module.train(False)
        for param in module.parameters():
            param.requires_grad_(False)

        running_dist = 0.
        for batch, data in enumerate(dataloader[phase], 1):
            x, t, idx = data
            if use_gpu:
                x = x.cuda()
                t = t.cuda()
            batch_size = 32
            bs = 4

            for i in range(0, batch_size, bs):
                #xm = x[i:i + bs, :, 3:(height-3), 3:(width-3)]
                xm = mirror_padding(x[i:i + bs], 21, True)
Пример #2
0
iterations = 0
for stage in ([0]):
    if stage == 0:
        rg = range(0, 30000)  #700
        lr = 0.0003  #* pow(0.9997,2100)
    else:
        rg = range(2)
        lr = 0.0003
    for epoch in rg:
        lr *= 0.9997
        print("\nEpoch {:d}".format(epoch))
        for phase in ["train", "valid"]:

            if phase == "train":
                print("Training...")
                module.train(True)
                for param in module.parameters():
                    param.requires_grad_(True)
                for param_g in optimizer.param_groups:
                    param_g['lr'] = lr
            else:
                print("Validing...")
                module.train(False)
                for param in module.parameters():
                    param.requires_grad_(False)

            running_dist = 0.
            for batch, data in enumerate(dataloader[phase], 1):
                x, t, idx = data
                if use_gpu:
                    x = x.cuda()