Exemplo n.º 1
0
def validate(epoch, model, test_loader):
    test_acc = 0
    test_loss = 0
    test_num = 0
    for it, (img_tensor, label, img_mask_tensor) in enumerate(test_loader):
        image_ = Variable(img_tensor.cuda(), volatile=True)
        label_ = Variable(img_mask_tensor.cuda(), volatile=True)

        logits = model(image_)
        probs = F.sigmoid(logits)
        masks = (probs > 0.5).float()

        loss = criterion(logits, label_)
        acc = dice_loss(masks, label_)
        batch_size = len(img_mask_tensor)
        test_num += batch_size
        test_loss += batch_size * loss.data[0]
        test_acc += batch_size * acc.data[0]

    valid_loss = test_loss / test_num
    valid_acc = test_acc / test_num
    if epoch % EPOCH_VALID == 0 or epoch == 0 or epoch == NUM_EPOCHES - 1:
        logger.info("Validate=>\n"
                    "Epoch: {epoch}\t"
                    "Valid_loss: {valid_loss:.3f}\t"
                    "Valid_acc: {valid_acc:.3f}%".format(
                        epoch=epoch,
                        valid_loss=round(valid_loss, 4),
                        valid_acc=round(valid_acc, 4) * 100))

    return valid_loss, valid_acc
Exemplo n.º 2
0
print("Data from: {}".format(load_dir))

load_path = load_dir + '/{}/train/0'.format(group_size)
save_path = main_path + '/{}'.format(args.name)

if not os.path.exists(save_path):
    os.makedirs(save_path)

######################
# initialization
if gpu_num > 1:
    model = nn.DataParallel(
        net.load_net(model_name, 1, out_channels, start_filts, depth, img_side,
                     num_cn, split, kernel_size,
                     num_global_control)).to(args.device)
    criterion = nn.DataParallel(net.criterion(degree=degree)).to(args.device)
    regularizer = nn.DataParallel(net.regularizer()).to(args.device)
    print("Assigned {} GPUs".format(gpu_num))
else:
    model = net.load_net(model_name, 1, out_channels, start_filts, depth,
                         img_side, num_cn, split, kernel_size,
                         num_global_control).to(args.device)
    criterion = net.criterion(degree=degree).to(args.device)
    regularizer = net.regularizer().to(args.device)
    print("Assigned on {}".format(args.device))

print('network contains {} parameters'.format(
    net.count_parameters(model)))  # parameter number
time.sleep(2)

displayer = kv.displayer()
Exemplo n.º 3
0
    "kura_update_rate = {}\n".format(kura_update_rate),
    "episodes = {}\n".format(episodes),
    "learning_rate = {}\n".format(learning_rate),
    "sparsity_weight = {}\n".format(sparsity_weight),
    "shuffle = {}\n".format(shuffle), "network=net.simple_conv\n",
    "num_cn={}\n".format(num_cn), "critic_dist={}\n".format(critic_dist),
    "anneal={}\n".format(anneal), "loss:exinp_integrate_torch"
]
file.writelines(L)
file.close()

######################
# initialization
if tc.cuda.device_count() > 1:
    model = nn.DataParallel(net.simple_conv()).to(args.device)
    criterion = nn.DataParallel(net.criterion()).to(args.device)
else:
    model = net.simple_conv().to(args.device)
    criterion = net.criterion().to(args.device)
print('network contains {} parameters'.format(
    net.count_parameters(model)))  # parameter number
time.sleep(2)

initial_phase = np.random.rand(1, img_side**2) * 2 * np.pi
rand_phase = tc.tensor(initial_phase).to('cpu')
batch_initial_phase = rand_phase.repeat(train_batch_size, 1).to(args.device)
cv_initial_phase = rand_phase.repeat(cv_batch_size, 1).detach().to(args.device)

loss_history = []
loss_cv_history = []
coupling_history = []
Exemplo n.º 4
0
def train(epoch, net, optimizer, train_data_loader):
    """
    :param epoch:
    :param net:
    :param optimizer:
    :param train_data_loader:
    :return:
    """
    smooth_loss = 0.0
    smooth_acc = 0.0
    sum_smooth_loss = 0
    sum_smooth_acc = 0
    sum_iter = 0
    it_smooth = 100

    for it, (img_tensor, label,
             img_mask_tensor) in enumerate(train_data_loader):
        image_ = Variable(img_tensor.cuda())
        label_ = Variable(img_mask_tensor.cuda())

        # compute output
        logits = net(image_)
        probs = F.sigmoid(logits)
        masks = (probs > 0.5).float()
        loss = criterion(logits, label_)

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        acc = dice_loss(masks, label_)

        sum_smooth_loss += loss.data[0]
        sum_smooth_acc += acc.data[0]
        sum_iter += 1
        if it % it_smooth == 0:
            smooth_loss = sum_smooth_loss / sum_iter
            smooth_acc = sum_smooth_acc / sum_iter
            sum_smooth_loss = 0.0
            sum_smooth_acc = 0.0
            sum_iter = 0

        if it % it_smooth == 0 or it == len(train_data_loader) - 1:
            train_acc = acc.data[0]
            train_loss = loss.data[0]

            logger.info("Train=>\n"
                        "Epoch: {epoch}\t"
                        "Batch_num: {iter_num}\t"
                        "lr: {lr:.3f}\t"
                        "loss: {smooth_loss:.3f}\t"
                        "acc: {smooth_acc:.3f}%\t"
                        "train_loss: {train_loss}\t"
                        "train_acc: {train_acc}%".format(
                            epoch=epoch,
                            iter_num=it,
                            lr=0,
                            smooth_loss=round(smooth_loss, 4),
                            smooth_acc=round(smooth_acc, 4) * 100,
                            train_loss=round(train_loss, 4),
                            train_acc=round(train_acc, 4) * 100))
Exemplo n.º 5
0
degree = int(experiment['degree'])
kura_update_rate = float(experiment['kura_update_rate'])
anneal = float(experiment['anneal'])
rp_field = experiment['rp_field']
main_path = experiment['main_path']
save_path = os.path.join(main_path, args.name)
checkpoint = tc.load(os.path.join(save_path, args.model),
                     map_location=tc.device('cuda'))

test_save_path = os.path.join(save_path, 'test-{}'.format(args.model))
if not os.path.exists(test_save_path):
    os.makedirs(test_save_path)

model = net.load_net(model_name, 1, out_channels, start_filts, depth, img_side,
                     num_cn, split, kernel_size, num_global_control).to('cuda')
criterion = net.criterion(degree=degree)
regularizer = net.regularizer()

model.load_state_dict(checkpoint['model_state_dict'])
model.eval()

rand_phase = checkpoint['initial_phase'].squeeze(
    0)  # Change this line to random initialization when test random walk
connectivity = checkpoint['connectivity'].squeeze(0)
global_connectivity = checkpoint['gconnectivity']

batch_connectivity = connectivity.repeat(batch_size, 1, 1).to('cuda')
batch_initial_phase = rand_phase.repeat(batch_size, 1).to('cuda')

displayer = kv.displayer()