def validate(epoch,
             val_loader,
             model,
             crit_cls,
             crit_reg,
             opt,
             ctx,
             gen_shape=False):
    """
    One validation
    """
    generated_shapes = []
    original_shapes = []
    sample_prob = opt.inner_sample_prob
    loss_cls_sum, loss_reg_sum, n = 0.0, 0.0, 0

    for idx, data in enumerate(val_loader):
        start = time.time()

        shapes, labels, masks, params, param_masks = data[0], data[1], data[
            2], data[3], data[4]
        gt = shapes
        shapes = nd.expand_dims(shapes, axis=1)

        shapes = shapes.as_in_context(ctx)
        labels = labels.as_in_context(ctx)
        masks = masks.as_in_context(ctx)
        params = params.as_in_context(ctx)
        param_masks = param_masks.as_in_context(ctx)
        with autograd.train_mode():
            out = model.decode(shapes)
        #out = model(shapes, labels, sample_prob)
        bsz, n_block, n_step = labels.shape
        labels = labels.reshape(bsz, n_block * n_step)
        masks = masks.reshape(bsz, n_block * n_step)
        out_pgm = out[0].reshape(bsz, n_block * n_step, opt.program_size + 1)

        bsz, n_block, n_step, n_param = params.shape
        params = params.reshape(bsz, n_block * n_step, n_param)
        param_masks = param_masks.reshape(bsz, n_block * n_step, n_param)
        out_param = out[1].reshape(bsz, n_block * n_step, n_param)
        loss_cls, acc = crit_cls(out_pgm, labels, masks)
        loss_reg = crit_reg(out_param, params, param_masks)

        end = time.time()

        loss_cls = loss_cls.mean().asscalar()
        loss_reg = loss_reg.mean().asscalar()

        if idx % opt.info_interval == 0:
            out_1 = nd.round(out[0]).astype('int64')
            out_2 = nd.round(out[1]).astype('int64')
            pred = nd.from_numpy(decode_multiple_block(
                out_1, out_2)).astype("float32").as_in_context(mx.cpu())
            IoU = BatchIoU(pred, gt)
            print(
                "Test: epoch {} batch {}/{}, loss_cls = {:.3f}, loss_reg = {:.3f}, acc = {:.3f}, IoU = {:.3f} time = {:.3f}"
                .format(epoch, idx, len(val_loader), loss_cls, loss_reg,
                        acc[0].asscalar(), IoU.mean(), end - start))
            sys.stdout.flush()
Ejemplo n.º 2
0
def run():
    opt = parse_argument()

    if not os.path.isdir(opt.prog_save_path):
        os.makedirs(opt.prog_save_path)
    if not os.path.isdir(opt.imgs_save_path):
        os.makedirs(opt.imgs_save_path)

    print('========= arguments =========')
    for key, val in vars(opt).items():
        print("{:20} {}".format(key, val))
    print('========= arguments =========')

    # data loader
    test_set = ShapeNet3D(opt.data)
    test_loader = DataLoader(
        dataset=test_set,
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=opt.num_workers,
    )

    # model
    ckpt = torch.load(opt.model)
    model = BlockOuterNet(ckpt['opt'])
    model.load_state_dict(ckpt['model'])
    if opt.is_cuda:
        model = model.cuda()
        cudnn.benchmark = True

    # test the model and evaluate the IoU
    ori_shapes, gen_shapes, pgms, params = test_on_shapenet_data(
        epoch=0, test_loader=test_loader, model=model, opt=opt, gen_shape=True)
    IoU = BatchIoU(ori_shapes, gen_shapes)
    print("Mean IoU: {:.3f}".format(IoU.mean()))

    # execute the generated program to generate the reconstructed shapes
    # for double-check purpose, can be disabled
    num_shapes = gen_shapes.shape[0]
    res = []
    for i in range(num_shapes):
        data = execute_shape_program(pgms[i], params[i])
        res.append(data.reshape((1, 32, 32, 32)))
    res = np.concatenate(res, axis=0)
    IoU_2 = BatchIoU(ori_shapes, res)

    assert abs(IoU.mean() - IoU_2.mean()) < 0.1, 'IoUs are not matched'

    # save results
    save_file = os.path.join(opt.save_path, 'shapes.h5')
    f = h5py.File(save_file, 'w')
    f['data'] = gen_shapes
    f['pgms'] = pgms
    f['params'] = params
    f.close()

    # Interpreting programs to understandable program strings
    if opt.save_prog:
        interpreter = Interpreter(translate, rotate, end)
        num_programs = gen_shapes.shape[0]
        for i in range(min(num_programs, opt.num_render)):
            program = interpreter.interpret(pgms[i], params[i])
            save_file = os.path.join(opt.prog_save_path, '{}.txt'.format(i))
            with open(save_file, 'w') as out:
                out.write(program)

    # Visualization
    if opt.save_img:
        data = gen_shapes.transpose((0, 3, 2, 1))
        data = np.flip(data, axis=2)
        num_shapes = data.shape[0]
        for i in range(min(num_shapes, opt.num_render)):
            voxels = data[i]
            save_name = os.path.join(opt.imgs_save_path, '{}.png'.format(i))
            visualization(voxels,
                          threshold=0.1,
                          save_name=save_name,
                          uniform_size=0.9)
Ejemplo n.º 3
0
def train(epoch, train_loader, model, logsoft, soft, criterion, optimizer,
          opt):
    """
    one epoch training for program executor
    """
    model.train()
    criterion.train()

    for idx, data in enumerate(train_loader):
        start_t = time.time()

        optimizer.zero_grad()

        shape, label, param = data[0], data[1], data[2]

        bsz = shape.size(0)
        n_step = label.size(1)

        index = np.array(list(map(lambda x: n_step, label)))
        index = index - 1

        # add noise during training, making the executor accept
        # continuous output from program generator

        label = label.view(-1, 1)
        pgm_vector = 0.1 * torch.rand(bsz * n_step, stop_id)
        pgm_noise = 0.1 * torch.rand(bsz * n_step, 1)
        pgm_value = torch.ones(bsz * n_step, 1) - pgm_noise
        pgm_vector.scatter_(1, label, pgm_value)
        pgm_vector = pgm_vector.view(bsz, n_step, stop_id)

        param_noise = torch.rand(param.size())
        param_vector = param + 0.6 * (param_noise - 0.5)

        gt = shape
        index = torch.from_numpy(index).long()
        pgm_vector = pgm_vector.float()
        param_vector = param_vector.float()

        if opt.is_cuda:
            gt = gt.cuda()
            index = index.cuda()
            pgm_vector = pgm_vector.cuda()
            param_vector = param_vector.cuda()

        pred = model(pgm_vector, param_vector, index)
        scores = logsoft(pred)
        loss = criterion(scores, gt)

        loss.backward()
        clip_gradient(optimizer, opt.grad_clip)
        optimizer.step()
        loss = loss.data[0]

        pred = soft(pred)
        pred = pred[:, 1, :, :, :]
        s1 = gt.view(-1, 32, 32, 32).data.cpu().numpy()
        s2 = pred.squeeze().data.cpu().numpy()
        s2 = (s2 > 0.5)

        batch_iou = BatchIoU(s1, s2)
        iou = batch_iou.sum() / s1.shape[0]

        end_t = time.time()

        if idx % (opt.info_interval * 10) == 0:
            print(
                "Train: epoch {} batch {}/{}, loss13 = {:.3f}, iou = {:.3f}, time = {:.3f}"
                .format(epoch, idx, len(train_loader), loss, iou,
                        end_t - start_t))
            sys.stdout.flush()
Ejemplo n.º 4
0
def run():

    opt = options_train_executor.parse()

    print('===== arguments: program executor =====')
    for key, val in vars(opt).items():
        print("{:20} {}".format(key, val))
    print('===== arguments: program executor =====')

    if not os.path.isdir(opt.save_folder):
        os.makedirs(opt.save_folder)

    # build dataloader
    train_set = PartPrimitive(opt.train_file)
    train_loader = DataLoader(
        dataset=train_set,
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
    )
    val_set = PartPrimitive(opt.val_file)
    val_loader = DataLoader(
        dataset=val_set,
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=opt.num_workers,
    )

    # build the model
    model = RenderNet(opt)
    logsoft = nn.LogSoftmax(dim=1)
    soft = nn.Softmax(dim=1)
    criterion = nn.NLLLoss(weight=torch.Tensor([opt.n_weight, opt.p_weight]))

    if opt.is_cuda:
        if opt.num_gpu > 1:
            gpu_ids = [i for i in range(opt.num_gpu)]
            model = torch.nn.DataParallel(model, device_ids=gpu_ids)
        model = model.cuda()
        logsoft = logsoft.cuda()
        soft = soft.cuda()
        criterion = criterion.cuda()
        cudnn.benchmark = True

    optimizer = optim.Adam(model.parameters(),
                           lr=opt.learning_rate,
                           betas=(opt.beta1, opt.beta2),
                           weight_decay=opt.weight_decay)

    for epoch in range(1, opt.epochs + 1):
        adjust_learning_rate(epoch, opt, optimizer)

        print("###################")
        print("training")
        train(epoch, train_loader, model, logsoft, soft, criterion, optimizer,
              opt)

        print("###################")
        print("testing")
        gen_shapes, ori_shapes = validate(epoch,
                                          val_loader,
                                          model,
                                          logsoft,
                                          soft,
                                          criterion,
                                          opt,
                                          gen_shape=True)
        gen_shapes = (gen_shapes > 0.5)
        gen_shapes = gen_shapes.astype(np.float32)
        iou = BatchIoU(ori_shapes, gen_shapes)
        print("Mean IoU: {:.3f}".format(iou.mean()))

        if epoch % opt.save_interval == 0:
            print('Saving...')
            state = {
                'opt':
                opt,
                'model':
                model.module.state_dict()
                if opt.num_gpu > 1 else model.state_dict(),
                'optimizer':
                optimizer.state_dict(),
                'epoch':
                epoch,
            }
            save_file = os.path.join(
                opt.save_folder, 'ckpt_epoch_{epoch}.t7'.format(epoch=epoch))
            torch.save(state, save_file)

    state = {
        'opt':
        opt,
        'model':
        model.module.state_dict() if opt.num_gpu > 1 else model.state_dict(),
        'optimizer':
        optimizer.state_dict(),
        'epoch':
        opt.epochs,
    }
    save_file = os.path.join(opt.save_folder, 'program_executor.t7')
    torch.save(state, save_file)
Ejemplo n.º 5
0
def validate(epoch,
             val_loader,
             model,
             logsoft,
             soft,
             criterion,
             opt,
             gen_shape=False):

    # load pre-fixed randomization
    try:
        rand1 = np.load(opt.rand1)
        rand2 = np.load(opt.rand2)
        rand3 = np.load(opt.rand3)
    except:
        rand1 = np.random.rand(opt.batch_size * opt.seq_length,
                               stop_id).astype(np.float32)
        rand2 = np.random.rand(opt.batch_size * opt.seq_length,
                               1).astype(np.float32)
        rand3 = np.random.rand(opt.batch_size, opt.seq_length,
                               max_param - 1).astype(np.float32)
        np.save(opt.rand1, rand1)
        np.save(opt.rand2, rand2)
        np.save(opt.rand3, rand3)

    model.eval()
    criterion.eval()

    generated_shapes = []
    original_shapes = []

    for idx, data in enumerate(val_loader):
        start_t = time.time()

        shape, label, param = data[0], data[1], data[2]

        bsz = shape.size(0)
        n_step = label.size(1)

        index = np.array(list(map(lambda x: n_step, label)))
        index = index - 1

        label = label.view(-1, 1)
        pgm_vector = 0.1 * torch.from_numpy(rand1)
        pgm_noise = 0.1 * torch.from_numpy(rand2)
        pgm_value = torch.ones(bsz * n_step, 1) - pgm_noise
        pgm_vector.scatter_(1, label, pgm_value)
        pgm_vector = pgm_vector.view(bsz, n_step, stop_id)

        param_noise = torch.from_numpy(rand3)
        param_vector = param + 0.6 * (param_noise - 0.5)

        gt = shape
        index = torch.from_numpy(index).long()
        pgm_vector = pgm_vector.float()
        param_vector = param_vector.float()

        if opt.is_cuda:
            gt = gt.cuda()
            index = index.cuda()
            pgm_vector = pgm_vector.cuda()
            param_vector = param_vector.cuda()

        pred = model(pgm_vector, param_vector, index)
        scores = logsoft(pred)
        loss = criterion(scores, gt)

        loss = loss.data[0]

        pred = soft(pred)
        pred = pred[:, 1, :, :, :]
        s1 = gt.view(-1, 32, 32, 32).data.cpu().numpy()
        s2 = pred.squeeze().data.cpu().numpy()
        s2 = (s2 > 0.5)

        batch_iou = BatchIoU(s1, s2)
        iou = batch_iou.sum() / s1.shape[0]

        original_shapes.append(s1)
        generated_shapes.append(s2)

        end_t = time.time()

        if (idx + 1) % opt.info_interval == 0:
            print(
                "Test: epoch {} batch {}/{}, loss13 = {:.3f}, iou = {:.3f}, time = {:.3f}"
                .format(epoch, idx + 1, len(val_loader), loss, iou,
                        end_t - start_t))
            sys.stdout.flush()

    if gen_shape:
        generated_shapes = np.asarray(generated_shapes)
        original_shapes = np.asarray(original_shapes)

    return generated_shapes, original_shapes
Ejemplo n.º 6
0
def train(epoch, train_loader, model,loss, optimizer, opt,ctx,train_loss,train_iou):
    """
    one epoch training for program executor
    """
    loss_sum,iou_sum,n = 0.0,0.0,0
    for idx, data in enumerate(train_loader):
        start_t = time.time()

        shape, label, param = data
        bsz = shape.shape[0]
        n_step = label.shape[1]
        #print("label.shape:",label)
        #print("n_step:",n_step,"bsz:",bsz,"stop_id:",stop_id)
        
        index = np.array(list(map(lambda x: n_step, label)))-1
        #index = label
        
        # add noise during training, making the executor accept
        # continuous output from program generator
        label = label.reshape(-1,1).asnumpy()
        pgm_vector = 0.2 * np.random.uniform(0,1,(bsz * n_step, stop_id))
        pgm_noise = 0.2 *np.random.uniform(0,1,label.shape)
        pgm_value = 1 - pgm_noise
        #print('pgm_val.shape:',pgm_value.shape,'label.shape:',label.shape,'label.shape:',label.shape)
        pgm_vector = scatter_numpy(pgm_vector,1,label,pgm_value).reshape(bsz,n_step,stop_id)
        
        
        param_noise = nd.random_uniform(0,1,shape=param.shape)
        param_vector = param + 0.6 * (param_noise - 0.5)
        #print("param_vector.shape:",param_vector.shape)
        gt = shape.as_in_context(ctx)
        #print(pgm_vector.dtype)
        index = nd.from_numpy(index).astype('int64').as_in_context(ctx)
        pgm_vector = nd.from_numpy(pgm_vector).astype('float32').as_in_context(ctx)
        param_vector = param_vector.as_in_context(ctx)


        with autograd.record():
            pred = model(pgm_vector, param_vector, index)
            scores = nd.log_softmax(pred,axis=1)
            pred0 = scores[:,0].squeeze()*opt.n_weight
            pred1 = scores[:,1].squeeze()*opt.p_weight
            l = -nd.where(gt, pred1, pred0).mean((1,2,3))
            #l = -(nd.pick(scores1, gt, axis=1, keepdims=True)*opt.n_weight
            #    +nd.pick(scores2,(1-gt), axis=1, keepdims=True)*opt.p_weight).mean((1,2,3,4))
        l.backward()
                                        
        #clip_gradient(optimizer, opt.grad_clip)
        #optimizer._allreduce_grads();

        optimizer.step(l.shape[0],ignore_stale_grad=True)
        
        l = l.mean().asscalar()
        
        pred = nd.softmax(pred,axis = 1)
        pred = pred[:, 1, :, :, :]
        s1 = gt.reshape(-1, 32, 32, 32).astype('float32').as_in_context(mx.cpu())
        s2 = pred.squeeze().as_in_context(mx.cpu())
        #print(s2.shape)
        s2 = (s2 > 0.5)

        batch_iou = BatchIoU(s1, s2)
        iou = batch_iou.mean()
        end_t = time.time()
        loss_sum+=l
        n+=1
        iou_sum+=iou

        if idx % (opt.info_interval * 10) == 0:
            print("Train: epoch {} batch {}/{}, loss13 = {:.3f}, iou = {:.3f}, time = {:.3f}"
                  .format(epoch, idx, len(train_loader), l, iou, end_t - start_t))
            sys.stdout.flush()
        
    train_loss.append(loss_sum/n)
    train_iou.append(iou_sum/n)
Ejemplo n.º 7
0
def validate(epoch, val_loader, model, loss, opt, ctx,val_loss,val_iou, gen_shape=False):

    # load pre-fixed randomization
    try:
        rand1 = np.load(opt.rand1)
        rand2 = np.load(opt.rand2)
        rand3 = np.load(opt.rand3)
    except:
        rand1 = np.random.rand(opt.batch_size * opt.seq_length, stop_id).astype(np.float32)
        rand2 = np.random.rand(opt.batch_size * opt.seq_length, 1).astype(np.float32)
        rand3 = np.random.rand(opt.batch_size, opt.seq_length, max_param - 1).astype(np.float32)
        np.save(opt.rand1, rand1)
        np.save(opt.rand2, rand2)
        np.save(opt.rand3, rand3)

    generated_shapes = None
    original_shapes = None
    
    loss_sum,iou_sum,n = 0.0,0.0,0
    for idx, data in enumerate(val_loader):
        start_t = time.time()

        shape, label, param = data

        bsz = shape.shape[0]
        n_step = label.shape[1]
        index = np.array(list(map(lambda x: n_step, label)))
        index = index - 1

        # add noise during training, making the executor accept
        # continuous output from program generator
        
        label = label.reshape(-1,1).asnumpy()
        pgm_vector = 0.1*rand1
        pgm_noise = 0.1*rand2
        pgm_value = np.ones(label.shape) - pgm_noise
        #print('pgm_val.shape:',pgm_value.shape,'label.shape:',label.shape,'label.shape:',label.shape)
        pgm_vector = scatter_numpy(pgm_vector,1,label,pgm_value).reshape(bsz,n_step,stop_id)

        param_noise = nd.from_numpy(rand3)
        #print(param.shape,param_noise.shape)
        param_vector = param + 0.6 * (param_noise - 0.5)
        
        
        gt = shape.astype('float32').as_in_context(ctx)
        index = nd.from_numpy(index).astype('int64').as_in_context(ctx)
        pgm_vector = nd.from_numpy(pgm_vector).as_in_context(ctx)
        param_vector = param_vector.as_in_context(ctx)
        #prediction
        pred = model(pgm_vector, param_vector, index)
        scores = nd.log_softmax(pred,axis=1)
        pred0 = scores[:,0].squeeze()*opt.p_weight
        pred1 = scores[:,1].squeeze()*opt.n_weight
        l = -nd.where(gt, pred1, pred0).mean((1,2,3))
        #print(pred2.dtype,gt.dtype)
        #l = loss(pred,gt,sample_weight = nd.array([opt.n_weight,opt.p_weight]))

        l = l.mean().asscalar()
        pred = nd.softmax(pred,axis=1)
        pred = pred[:, 1, :, :, :]
        s1 = gt.reshape(-1, 32, 32, 32).as_in_context(mx.cpu())
        s2 = pred.squeeze().as_in_context(mx.cpu())
        s2 = (s2 > 0.5)

        batch_iou = BatchIoU(s1, s2)
        iou = batch_iou.mean()
        loss_sum+=l
        n+=1
        iou_sum+=iou
        
        if(idx+1)%5==0 and gen_shape:
            if original_shapes is None:
                original_shapes = s1.expand_dims(axis=0)
                generated_shapes = s2.expand_dims(axis=0)
            else:
                original_shapes = nd.concat(original_shapes,s1.expand_dims(axis=0),dim=0)
                generated_shapes = nd.concat(generated_shapes,s2.expand_dims(axis=0),dim=0)
        end_t = time.time()

        if (idx + 1) % opt.info_interval == 0:
            print("Test: epoch {} batch {}/{}, loss13 = {:.3f}, iou = {:.3f}, time = {:.3f}"
                  .format(epoch, idx + 1, len(val_loader), l, iou, end_t - start_t))
            sys.stdout.flush()
        if(idx+1>len(val_loader)/10):
            
            break;     
    val_loss.append(loss_sum/n)
    val_iou.append(iou_sum/n)

    return generated_shapes, original_shapes
Ejemplo n.º 8
0
def train(epoch, train_loader, generator, executor, soft, criterion, optimizer,
          opt):
    """
    one epoch guided adaptation
    """
    generator.train()

    # set executor as train, but actually does not update parameters
    # otherwise cannot bp through LSTM
    executor.train()

    def set_bn_eval(m):
        classname = m.__class__.__name__
        if classname.find('BatchNorm') != -1:
            m.eval()

    executor.apply(set_bn_eval)

    for idx, data in enumerate(train_loader):
        start = time.time()

        optimizer.zero_grad()
        generator.zero_grad()
        executor.zero_grad()

        shapes = data
        raw_shapes = data

        shapes = torch.unsqueeze(shapes, 1)
        if opt.is_cuda:
            shapes = shapes.cuda()

        pgms, params = generator.decode(shapes)

        # truly rendered shapes
        rendered_shapes = decode_multiple_block(pgms, params)
        IoU2 = BatchIoU(rendered_shapes, raw_shapes.clone().numpy())

        # neurally rendered shapes
        pgms = torch.exp(pgms)
        bsz, n_block, n_step, n_vocab = pgms.shape
        pgm_vector = pgms.view(bsz * n_block, n_step, n_vocab)
        bsz, n_block, n_step, n_param = params.shape
        param_vector = params.view(bsz * n_block, n_step, n_param)
        index = (n_step - 1) * torch.ones(bsz * n_block).long()
        if opt.is_cuda:
            index = index.cuda()

        pred = executor(pgm_vector, param_vector, index)
        pred = soft(pred)
        pred = pred[:, 1, :, :, :]
        pred = pred.contiguous().view(bsz, n_block, 32, 32, 32)

        rec, _ = torch.max(pred[:, :, :, :, :], dim=1)
        rec1 = rec
        rec1.unsqueeze_(1)
        rec0 = 1 - rec1
        rec_all = torch.cat((rec0, rec1), dim=1)
        rec_all = torch.log(rec_all + 1e-10)
        loss = criterion(rec_all, shapes.detach().squeeze_(1).long())

        loss.backward()
        clip_gradient(optimizer, opt.grad_clip)
        optimizer.step()

        reconstruction = rec.data.cpu().numpy()
        reconstruction = np.squeeze(reconstruction, 1)
        reconstruction = reconstruction > 0.5
        reconstruction = reconstruction.astype(np.uint8)
        raw_shapes = raw_shapes.clone().numpy()
        IoU1 = BatchIoU(reconstruction, raw_shapes)

        if opt.is_cuda:
            torch.cuda.synchronize()

        end = time.time()

        if idx % opt.info_interval == 0:
            print(
                "Train: epoch {} batch {}/{}, loss = {:.3f}, IoU1 = {:.3f}, IoU2 = {:.3f}, time = {:.3f}"
                .format(epoch, idx, len(train_loader), loss.data[0],
                        IoU1.mean(), IoU2.mean(), end - start))
            sys.stdout.flush()
Ejemplo n.º 9
0
def run():
    # get options
    opt = options_guided_adaptation.parse()

    print('===== arguments: guided adaptation =====')
    for key, val in vars(opt).items():
        print("{:20} {}".format(key, val))
    print('===== arguments: guided adaptation =====')

    if not os.path.isdir(opt.save_folder):
        os.makedirs(opt.save_folder)

    # build loaders
    train_set = ShapeNet3D(opt.train_file)
    train_loader = DataLoader(
        dataset=train_set,
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
    )
    val_set = ShapeNet3D(opt.val_file)
    val_loader = DataLoader(
        dataset=val_set,
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=opt.num_workers,
    )

    # load program generator
    ckpt_p_gen = torch.load(opt.p_gen_path)
    generator = BlockOuterNet(ckpt_p_gen['opt'])
    generator.load_state_dict(ckpt_p_gen['model'])

    # load program executor
    ckpt_p_exe = torch.load(opt.p_exe_path)
    executor = RenderNet(ckpt_p_exe['opt'])
    executor.load_state_dict(ckpt_p_exe['model'])

    # build loss functions
    soft = nn.Softmax(dim=1)
    criterion = nn.NLLLoss(weight=torch.Tensor([1, 1]))

    if opt.is_cuda:
        generator = generator.cuda()
        executor = executor.cuda()
        soft = soft.cuda()
        criterion = criterion.cuda()
        cudnn.benchmark = True

    optimizer = optim.Adam(generator.parameters(),
                           lr=opt.learning_rate,
                           betas=(opt.beta1, opt.beta2),
                           weight_decay=opt.weight_decay)

    print("###################")
    print("testing")
    gen_shapes, ori_shapes = validate(0,
                                      val_loader,
                                      generator,
                                      opt,
                                      gen_shape=True)
    IoU = BatchIoU(ori_shapes, gen_shapes)
    print("iou: ", IoU.mean())

    best_iou = 0

    for epoch in range(1, opt.epochs + 1):
        print("###################")
        print("adaptation")
        train(epoch, train_loader, generator, executor, soft, criterion,
              optimizer, opt)
        print("###################")
        print("testing")
        gen_shapes, ori_shapes = validate(epoch,
                                          val_loader,
                                          generator,
                                          opt,
                                          gen_shape=True)
        IoU = BatchIoU(ori_shapes, gen_shapes)
        print("iou: ", IoU.mean())

        if epoch % opt.save_interval == 0:
            print('Saving...')
            state = {
                'opt': ckpt_p_gen['opt'],
                'model': generator.state_dict(),
                'optimizer': optimizer.state_dict(),
                'epoch': epoch,
            }
            save_file = os.path.join(
                opt.save_folder, 'ckpt_epoch_{epoch}.t7'.format(epoch=epoch))
            torch.save(state, save_file)

        if IoU.mean() >= best_iou:
            print('Saving best model')
            state = {
                'opt': ckpt_p_gen['opt'],
                'model': generator.state_dict(),
                'optimizer': optimizer.state_dict(),
                'epoch': epoch,
            }
            save_file = os.path.join(
                opt.save_folder, 'program_generator_GA_{}.t7'.format(opt.cls))
            torch.save(state, save_file)
            best_iou = IoU.mean()
Ejemplo n.º 10
0
def train(epoch, train_loader, generator, executor, criterion, optimizer, opt,
          ctx):
    """
    one epoch guided adaptation
    """
    def set_bn_eval(m):
        if m.prefix[:9] == 'batchnorm':
            m._kwargs['use_global_stats'] = True
            m.grad_req = 'null'

    executor.apply(set_bn_eval)
    for idx, data in enumerate(train_loader):
        start = time.time()
        shapes = data.as_in_context(ctx)
        raw_shapes = data
        shapes = shapes.expand_dims(axis=1)
        with autograd.record():
            pgms, params = generator.decode(shapes)

            # truly rendered shapes
            pgms_int = nd.round(pgms).astype('int64')
            params_int = nd.round(params).astype('int64')

            # neurally rendered shapes
            pgms = nd.exp(pgms)
            bsz, n_block, n_step, n_vocab = pgms.shape
            pgm_vector = pgms.reshape(bsz * n_block, n_step, n_vocab)
            bsz, n_block, n_step, n_param = params.shape
            param_vector = params.reshape(bsz * n_block, n_step, n_param)
            index = (n_step - 1) * nd.ones(bsz * n_block).astype('int64')
            index = index.as_in_context(ctx)

            pred = executor(pgm_vector, param_vector, index)
            pred = nd.softmax(pred, axis=1)
            #print(pred.shape)
            pred = pred[:, 1]
            pred = pred.reshape(bsz, n_block, 32, 32, 32)

            rec = nd.max(pred, axis=1)
            #print("rec.shape:",rec.shape,"shapes.shape:",shapes.shape)
            #rec1 = rec.expand_dims(axis=1)
            rec1 = nd.log(rec + 1e-11)
            rec0 = nd.log(1 - rec + 1e-11)
            #rec_all = nd.concat(rec0, rec1, dim=1)
            #rec_all1 = nd.log(rec_all + 1e-10)
            #rec_all2 = nd.log(1-rec_all + 1e-10)
            gt = shapes.squeeze().astype('int64')
            loss = -nd.where(gt, rec1, rec0).mean(axis=(1, 2, 3))
            #loss = -(nd.pick(rec_all1,gt,axis = 1,keepdims=True)).mean(axis = (1,2,3,4))
            #loss = criterion(rec_all, gt)
        loss.backward()
        optimizer.step(loss.shape[0], ignore_stale_grad=True)
        l = loss.mean().asscalar()

        rendered_shapes = decode_multiple_block(pgms_int, params_int)
        rendered_shapes = nd.from_numpy(rendered_shapes).astype(
            'float32').as_in_context(mx.cpu())
        IoU2 = BatchIoU(raw_shapes, rendered_shapes)
        reconstruction = (rec.as_in_context(mx.cpu()) > 0.5).astype('float32')
        IoU1 = BatchIoU(reconstruction, raw_shapes)
        #print("IoU1:",IoU1,IoU2)
        IoU1 = IoU1.mean()
        IoU2 = IoU2.mean()

        end = time.time()

        if idx % opt.info_interval == 0:
            print(
                "Train: epoch {} batch {}/{}, loss = {:.3f}, IoU1 = {:.3f}, IoU2 = {:.3f}, time = {:.3f}"
                .format(epoch, idx, len(train_loader), l, IoU1, IoU2,
                        end - start))
            sys.stdout.flush()
Ejemplo n.º 11
0
def run():
    # get options
    opt = options_guided_adaptation.parse()
    opt_gen = options_train_generator.parse()
    opt_exe = options_train_executor.parse()
    print('===== arguments: guided adaptation =====')
    for key, val in vars(opt).items():
        print("{:20} {}".format(key, val))
    print('===== arguments: guided adaptation =====')

    if not os.path.isdir(opt.save_folder):
        os.makedirs(opt.save_folder)

    # build loaders
    train_set = ShapeNet3D(opt.train_file)
    train_loader = gdata.DataLoader(dataset=train_set,
                                    batch_size=opt.batch_size,
                                    shuffle=True,
                                    num_workers=opt.num_workers)

    val_set = ShapeNet3D(opt.val_file)
    val_loader = gdata.DataLoader(dataset=val_set,
                                  batch_size=opt.batch_size,
                                  shuffle=False,
                                  num_workers=opt.num_workers)

    def visual(path, epoch, gen_shapes, file_name, nums_samples):
        data = gen_shapes.transpose((0, 3, 2, 1))
        data = np.flip(data, axis=2)
        num_shapes = data.shape[0]
        for i in range(min(nums_samples, num_shapes)):
            voxels = data[i]
            save_name = os.path.join(path, file_name.format(epoch, i))
            visualization(voxels,
                          threshold=0.1,
                          save_name=save_name,
                          uniform_size=0.9)

    ctx = d2l.try_gpu()

    # load program generator
    generator = BlockOuterNet(opt_gen)
    generator.init_blocks(ctx)
    generator.load_parameters("model of blockouternet")

    # load program executor
    executor = RenderNet(opt_exe)
    executor.initialize(init=init.Xavier(), ctx=ctx)
    executor.load_parameters("model of executor")

    # build loss functions
    criterion = gloss.SoftmaxCrossEntropyLoss(axis=1, from_logits=True)

    optimizer = Trainer(
        generator.collect_params(), "adam", {
            "learning_rate": opt.learning_rate,
            "wd": opt.weight_decay,
            'beta1': opt.beta1,
            'beta2': opt.beta2,
            'clip_gradient': opt.grad_clip
        })

    print("###################")
    print("testing")
    gen_shapes, ori_shapes = validate(0,
                                      val_loader,
                                      generator,
                                      opt,
                                      ctx,
                                      gen_shape=True)
    #visual('imgs of chairs/adaption/chair/',0,ori_shapes,'GT {}-{}.png',8)
    #visual('imgs of chairs/adaption/chair/',0,gen_shapes,'epoch{}-{}.png',8)

    gen_shapes = nd.from_numpy(gen_shapes)
    ori_shapes = nd.from_numpy(ori_shapes)
    #print(gen_shapes.dtype,ori_shapes.dtype)
    #print("done",ori_shapes.shape,gen_shapes.shape)

    IoU = BatchIoU(gen_shapes, ori_shapes)
    #print(IoU)
    print("iou: ", IoU.mean())

    best_iou = 0
    print(opt.epochs)
    for epoch in range(1, opt.epochs + 1):
        print("###################")
        print("adaptation")
        train(epoch, train_loader, generator, executor, criterion, optimizer,
              opt, ctx)
        print("###################")
        print("testing")
        gen_shapes, ori_shapes = validate(epoch,
                                          val_loader,
                                          generator,
                                          opt,
                                          ctx,
                                          gen_shape=True)
        #visual('imgs of chairs/adaption/chair/',epoch,gen_shapes,'epoch{}-{}.png',8)
        gen_shapes = nd.from_numpy(gen_shapes)
        ori_shapes = nd.from_numpy(ori_shapes)
        IoU = BatchIoU(gen_shapes, ori_shapes)

        print("iou: ", IoU.mean())

        if epoch % opt.save_interval == 0:

            print('Saving...')
            generator.save_parameters("generator of GA on shapenet")
            optimizer.save_states("optimazer of generator of GA on shapenet")

        if IoU.mean() >= best_iou:
            print('Saving best model')
            generator.save_parameters("generator of GA on shapenet")
            optimizer.save_states("optimazer of generator of GA on shapenet")
            best_iou = IoU.mean()
def train(epoch, train_loader, model, crit_cls, crit_reg, optimizer, opt, ctx):
    """
    One epoch training
    """
    cls_w = opt.cls_weight
    reg_w = opt.reg_weight
    # the prob: > 1
    # the input of step t Operator where is missing FInferType attributeis always sampled from the output of step t-1
    sample_prob = opt.inner_sample_prob

    for idx, data in enumerate(train_loader):
        start = time.time()
        #data, pgm, pgm_mask, param, param_mask
        shapes, labels, masks, params, param_masks = data[0], data[1], data[
            2], data[3], data[4]
        gt = shapes
        shapes = nd.expand_dims(shapes, axis=1)
        #print(labels[0],params[0])
        shapes = shapes.as_in_context(ctx)

        labels = labels.as_in_context(ctx)
        labels2 = labels.as_in_context(ctx)

        masks = masks.as_in_context(ctx)
        params = params.as_in_context(ctx)
        param_masks = param_masks.as_in_context(ctx)

        #shapes.attach_grad(),labels.attach_grad()
        with autograd.record():
            out = model(shapes, labels, sample_prob)
            #out = model.decode(shapes)

            # reshape
            bsz, n_block, n_step = labels.shape
            labels = labels.reshape(bsz, -1)
            masks = masks.reshape(bsz, -1)
            out_pgm = out[0].reshape(bsz, n_block * n_step,
                                     opt.program_size + 1)

            bsz, n_block, n_step, n_param = params.shape
            params = params.reshape(bsz, n_block * n_step, n_param)
            param_masks = param_masks.reshape(bsz, n_block * n_step, n_param)
            out_param = out[1].reshape(bsz, n_block * n_step, n_param)

            loss_cls, acc = crit_cls(out_pgm, labels, masks)
            loss_reg = crit_reg(out_param, params, param_masks)
            loss = cls_w * loss_cls + reg_w * loss_reg
        loss.backward()

        optimizer.step(bsz, ignore_stale_grad=True)

        loss_cls = loss_cls.mean().asscalar()
        loss_reg = loss_reg.mean().asscalar()

        end = time.time()

        if idx % (opt.info_interval * 10) == 0:
            out_1 = nd.round(out[0]).astype('int64')
            out_2 = nd.round(out[1]).astype('int64')
            pred = nd.from_numpy(decode_multiple_block(
                out_1, out_2)).astype("float32").as_in_context(mx.cpu())
            IoU = BatchIoU(pred, gt)
            print(
                "Train: epoch {} batch {}/{},loss_cls = {:.3f},loss_reg = {:.3f},acc = {:.3f},IoU = {:.3f},time = {:.2f}"
                .format(epoch, idx, len(train_loader), loss_cls, loss_reg,
                        acc[0].asscalar(), IoU.mean(), end - start))
            sys.stdout.flush()