Ejemplo n.º 1
0
def validate(epoch, val_loader, generator, opt, ctx, gen_shape=False):
    """
    evaluate program generator, in terms of IoU
    """
    generated_shapes = []
    original_shapes = []
    for idx, data in enumerate(val_loader):
        start = time.time()
        shapes = data.as_in_context(ctx)
        shapes = nd.expand_dims(shapes, axis=1)
        with autograd.train_mode():
            out = generator.decode(shapes)

        end = time.time()

        if gen_shape:
            out_1 = nd.round(out[0]).astype('int64')
            out_2 = nd.round(out[1]).astype('int64')
            generated_shapes.append(
                decode_multiple_block(out_1, out_2).astype("float32"))
            original_shapes.append(data.asnumpy())

        if idx % opt.info_interval == 0:
            print("Test: epoch {} batch {}/{}, time={:.3f}".format(
                epoch, idx, len(val_loader), end - start))

    if gen_shape:
        generated_shapes = np.concatenate(generated_shapes, axis=0)
        original_shapes = np.concatenate(original_shapes, axis=0)

    return generated_shapes, original_shapes
def validate(epoch,
             val_loader,
             model,
             crit_cls,
             crit_reg,
             opt,
             ctx,
             gen_shape=False):
    """
    One validation
    """
    generated_shapes = []
    original_shapes = []
    sample_prob = opt.inner_sample_prob
    loss_cls_sum, loss_reg_sum, n = 0.0, 0.0, 0

    for idx, data in enumerate(val_loader):
        start = time.time()

        shapes, labels, masks, params, param_masks = data[0], data[1], data[
            2], data[3], data[4]
        gt = shapes
        shapes = nd.expand_dims(shapes, axis=1)

        shapes = shapes.as_in_context(ctx)
        labels = labels.as_in_context(ctx)
        masks = masks.as_in_context(ctx)
        params = params.as_in_context(ctx)
        param_masks = param_masks.as_in_context(ctx)
        with autograd.train_mode():
            out = model.decode(shapes)
        #out = model(shapes, labels, sample_prob)
        bsz, n_block, n_step = labels.shape
        labels = labels.reshape(bsz, n_block * n_step)
        masks = masks.reshape(bsz, n_block * n_step)
        out_pgm = out[0].reshape(bsz, n_block * n_step, opt.program_size + 1)

        bsz, n_block, n_step, n_param = params.shape
        params = params.reshape(bsz, n_block * n_step, n_param)
        param_masks = param_masks.reshape(bsz, n_block * n_step, n_param)
        out_param = out[1].reshape(bsz, n_block * n_step, n_param)
        loss_cls, acc = crit_cls(out_pgm, labels, masks)
        loss_reg = crit_reg(out_param, params, param_masks)

        end = time.time()

        loss_cls = loss_cls.mean().asscalar()
        loss_reg = loss_reg.mean().asscalar()

        if idx % opt.info_interval == 0:
            out_1 = nd.round(out[0]).astype('int64')
            out_2 = nd.round(out[1]).astype('int64')
            pred = nd.from_numpy(decode_multiple_block(
                out_1, out_2)).astype("float32").as_in_context(mx.cpu())
            IoU = BatchIoU(pred, gt)
            print(
                "Test: epoch {} batch {}/{}, loss_cls = {:.3f}, loss_reg = {:.3f}, acc = {:.3f}, IoU = {:.3f} time = {:.3f}"
                .format(epoch, idx, len(val_loader), loss_cls, loss_reg,
                        acc[0].asscalar(), IoU.mean(), end - start))
            sys.stdout.flush()
Ejemplo n.º 3
0
def test_on_shapenet_data(epoch,
                          test_loader,
                          model,
                          opt,
                          gen_shape=False,
                          ctx):

    generated_shapes = []
    original_shapes = []
    gen_pgms = []
    gen_params = []

    for idx, data in enumerate(test_loader):
        start = time.time()

        shapes = data
        shape = nd.expand_dims(data, axis=1).as_in_context(ctx)
        with autograd.train_mode():
            out = model.decode(shapes)

        end = time.time()

        if gen_shape:
            generated_shapes.append(decode_multiple_block(out[0], out[1]))
            original_shapes.append(data.asnumpy())
            save_pgms = nd.argmax(out[0],
                                  dim=3).asnumpy().as_in_context(mx.cpu())
            save_params = out[1].asnumpy().as_in_context(mx.cpu())
            gen_pgms.append(save_pgms)
            gen_params.append(save_params)

        if idx % opt.info_interval == 0:
            print("Test: epoch {} batch {}/{}, time={:.3f}".format(
                epoch, idx, len(test_loader), end - start))

    if gen_shape:
        generated_shapes = np.concatenate(generated_shapes, axis=0)
        original_shapes = np.concatenate(original_shapes, axis=0)
        gen_pgms = np.concatenate(gen_pgms, axis=0)
        gen_params = np.concatenate(gen_params, axis=0)

    return original_shapes, generated_shapes, gen_pgms, gen_params
Ejemplo n.º 4
0
def test_on_shapenet_data(epoch, test_loader, model, opt, gen_shape=False):

    model.eval()
    generated_shapes = []
    original_shapes = []
    gen_pgms = []
    gen_params = []

    for idx, data in enumerate(test_loader):
        start = time.time()

        shapes = data
        shapes = Variable(torch.unsqueeze(shapes, 1),
                          requires_grad=False).cuda()

        out = model.decode(shapes)

        if opt.is_cuda:
            torch.cuda.synchronize()
        end = time.time()

        if gen_shape:
            generated_shapes.append(decode_multiple_block(out[0], out[1]))
            original_shapes.append(data.clone().numpy())
            _, save_pgms = torch.max(out[0].data, dim=3)
            save_pgms = save_pgms.cpu().numpy()
            save_params = out[1].data.cpu().numpy()
            gen_pgms.append(save_pgms)
            gen_params.append(save_params)

        if idx % opt.info_interval == 0:
            print("Test: epoch {} batch {}/{}, time={:.3f}".format(
                epoch, idx, len(test_loader), end - start))

    if gen_shape:
        generated_shapes = np.concatenate(generated_shapes, axis=0)
        original_shapes = np.concatenate(original_shapes, axis=0)
        gen_pgms = np.concatenate(gen_pgms, axis=0)
        gen_params = np.concatenate(gen_params, axis=0)

    return original_shapes, generated_shapes, gen_pgms, gen_params
Ejemplo n.º 5
0
def validate(epoch, val_loader, generator, opt, gen_shape=False):
    """
    evaluate program generator, in terms of IoU
    """
    generator.eval()
    generated_shapes = []
    original_shapes = []

    for idx, data in enumerate(val_loader):
        start = time.time()

        shapes = data
        shapes = torch.unsqueeze(shapes, 1)

        if opt.is_cuda:
            shapes = shapes.cuda()

        out = generator.decode(shapes)

        if opt.is_cuda:
            torch.cuda.synchronize()

        end = time.time()

        if gen_shape:
            generated_shapes.append(decode_multiple_block(out[0], out[1]))
            original_shapes.append(data.clone().numpy())

        if idx % opt.info_interval == 0:
            print("Test: epoch {} batch {}/{}, time={:.3f}".format(
                epoch, idx, len(val_loader), end - start))

    if gen_shape:
        generated_shapes = np.concatenate(generated_shapes, axis=0)
        original_shapes = np.concatenate(original_shapes, axis=0)

    return generated_shapes, original_shapes
Ejemplo n.º 6
0
def train(epoch, train_loader, generator, executor, soft, criterion, optimizer,
          opt):
    """
    one epoch guided adaptation
    """
    generator.train()

    # set executor as train, but actually does not update parameters
    # otherwise cannot bp through LSTM
    executor.train()

    def set_bn_eval(m):
        classname = m.__class__.__name__
        if classname.find('BatchNorm') != -1:
            m.eval()

    executor.apply(set_bn_eval)

    for idx, data in enumerate(train_loader):
        start = time.time()

        optimizer.zero_grad()
        generator.zero_grad()
        executor.zero_grad()

        shapes = data
        raw_shapes = data

        shapes = torch.unsqueeze(shapes, 1)
        if opt.is_cuda:
            shapes = shapes.cuda()

        pgms, params = generator.decode(shapes)

        # truly rendered shapes
        rendered_shapes = decode_multiple_block(pgms, params)
        IoU2 = BatchIoU(rendered_shapes, raw_shapes.clone().numpy())

        # neurally rendered shapes
        pgms = torch.exp(pgms)
        bsz, n_block, n_step, n_vocab = pgms.shape
        pgm_vector = pgms.view(bsz * n_block, n_step, n_vocab)
        bsz, n_block, n_step, n_param = params.shape
        param_vector = params.view(bsz * n_block, n_step, n_param)
        index = (n_step - 1) * torch.ones(bsz * n_block).long()
        if opt.is_cuda:
            index = index.cuda()

        pred = executor(pgm_vector, param_vector, index)
        pred = soft(pred)
        pred = pred[:, 1, :, :, :]
        pred = pred.contiguous().view(bsz, n_block, 32, 32, 32)

        rec, _ = torch.max(pred[:, :, :, :, :], dim=1)
        rec1 = rec
        rec1.unsqueeze_(1)
        rec0 = 1 - rec1
        rec_all = torch.cat((rec0, rec1), dim=1)
        rec_all = torch.log(rec_all + 1e-10)
        loss = criterion(rec_all, shapes.detach().squeeze_(1).long())

        loss.backward()
        clip_gradient(optimizer, opt.grad_clip)
        optimizer.step()

        reconstruction = rec.data.cpu().numpy()
        reconstruction = np.squeeze(reconstruction, 1)
        reconstruction = reconstruction > 0.5
        reconstruction = reconstruction.astype(np.uint8)
        raw_shapes = raw_shapes.clone().numpy()
        IoU1 = BatchIoU(reconstruction, raw_shapes)

        if opt.is_cuda:
            torch.cuda.synchronize()

        end = time.time()

        if idx % opt.info_interval == 0:
            print(
                "Train: epoch {} batch {}/{}, loss = {:.3f}, IoU1 = {:.3f}, IoU2 = {:.3f}, time = {:.3f}"
                .format(epoch, idx, len(train_loader), loss.data[0],
                        IoU1.mean(), IoU2.mean(), end - start))
            sys.stdout.flush()
Ejemplo n.º 7
0
def train(epoch, train_loader, generator, executor, criterion, optimizer, opt,
          ctx):
    """
    one epoch guided adaptation
    """
    def set_bn_eval(m):
        if m.prefix[:9] == 'batchnorm':
            m._kwargs['use_global_stats'] = True
            m.grad_req = 'null'

    executor.apply(set_bn_eval)
    for idx, data in enumerate(train_loader):
        start = time.time()
        shapes = data.as_in_context(ctx)
        raw_shapes = data
        shapes = shapes.expand_dims(axis=1)
        with autograd.record():
            pgms, params = generator.decode(shapes)

            # truly rendered shapes
            pgms_int = nd.round(pgms).astype('int64')
            params_int = nd.round(params).astype('int64')

            # neurally rendered shapes
            pgms = nd.exp(pgms)
            bsz, n_block, n_step, n_vocab = pgms.shape
            pgm_vector = pgms.reshape(bsz * n_block, n_step, n_vocab)
            bsz, n_block, n_step, n_param = params.shape
            param_vector = params.reshape(bsz * n_block, n_step, n_param)
            index = (n_step - 1) * nd.ones(bsz * n_block).astype('int64')
            index = index.as_in_context(ctx)

            pred = executor(pgm_vector, param_vector, index)
            pred = nd.softmax(pred, axis=1)
            #print(pred.shape)
            pred = pred[:, 1]
            pred = pred.reshape(bsz, n_block, 32, 32, 32)

            rec = nd.max(pred, axis=1)
            #print("rec.shape:",rec.shape,"shapes.shape:",shapes.shape)
            #rec1 = rec.expand_dims(axis=1)
            rec1 = nd.log(rec + 1e-11)
            rec0 = nd.log(1 - rec + 1e-11)
            #rec_all = nd.concat(rec0, rec1, dim=1)
            #rec_all1 = nd.log(rec_all + 1e-10)
            #rec_all2 = nd.log(1-rec_all + 1e-10)
            gt = shapes.squeeze().astype('int64')
            loss = -nd.where(gt, rec1, rec0).mean(axis=(1, 2, 3))
            #loss = -(nd.pick(rec_all1,gt,axis = 1,keepdims=True)).mean(axis = (1,2,3,4))
            #loss = criterion(rec_all, gt)
        loss.backward()
        optimizer.step(loss.shape[0], ignore_stale_grad=True)
        l = loss.mean().asscalar()

        rendered_shapes = decode_multiple_block(pgms_int, params_int)
        rendered_shapes = nd.from_numpy(rendered_shapes).astype(
            'float32').as_in_context(mx.cpu())
        IoU2 = BatchIoU(raw_shapes, rendered_shapes)
        reconstruction = (rec.as_in_context(mx.cpu()) > 0.5).astype('float32')
        IoU1 = BatchIoU(reconstruction, raw_shapes)
        #print("IoU1:",IoU1,IoU2)
        IoU1 = IoU1.mean()
        IoU2 = IoU2.mean()

        end = time.time()

        if idx % opt.info_interval == 0:
            print(
                "Train: epoch {} batch {}/{}, loss = {:.3f}, IoU1 = {:.3f}, IoU2 = {:.3f}, time = {:.3f}"
                .format(epoch, idx, len(train_loader), l, IoU1, IoU2,
                        end - start))
            sys.stdout.flush()
def train(epoch, train_loader, model, crit_cls, crit_reg, optimizer, opt, ctx):
    """
    One epoch training
    """
    cls_w = opt.cls_weight
    reg_w = opt.reg_weight
    # the prob: > 1
    # the input of step t Operator where is missing FInferType attributeis always sampled from the output of step t-1
    sample_prob = opt.inner_sample_prob

    for idx, data in enumerate(train_loader):
        start = time.time()
        #data, pgm, pgm_mask, param, param_mask
        shapes, labels, masks, params, param_masks = data[0], data[1], data[
            2], data[3], data[4]
        gt = shapes
        shapes = nd.expand_dims(shapes, axis=1)
        #print(labels[0],params[0])
        shapes = shapes.as_in_context(ctx)

        labels = labels.as_in_context(ctx)
        labels2 = labels.as_in_context(ctx)

        masks = masks.as_in_context(ctx)
        params = params.as_in_context(ctx)
        param_masks = param_masks.as_in_context(ctx)

        #shapes.attach_grad(),labels.attach_grad()
        with autograd.record():
            out = model(shapes, labels, sample_prob)
            #out = model.decode(shapes)

            # reshape
            bsz, n_block, n_step = labels.shape
            labels = labels.reshape(bsz, -1)
            masks = masks.reshape(bsz, -1)
            out_pgm = out[0].reshape(bsz, n_block * n_step,
                                     opt.program_size + 1)

            bsz, n_block, n_step, n_param = params.shape
            params = params.reshape(bsz, n_block * n_step, n_param)
            param_masks = param_masks.reshape(bsz, n_block * n_step, n_param)
            out_param = out[1].reshape(bsz, n_block * n_step, n_param)

            loss_cls, acc = crit_cls(out_pgm, labels, masks)
            loss_reg = crit_reg(out_param, params, param_masks)
            loss = cls_w * loss_cls + reg_w * loss_reg
        loss.backward()

        optimizer.step(bsz, ignore_stale_grad=True)

        loss_cls = loss_cls.mean().asscalar()
        loss_reg = loss_reg.mean().asscalar()

        end = time.time()

        if idx % (opt.info_interval * 10) == 0:
            out_1 = nd.round(out[0]).astype('int64')
            out_2 = nd.round(out[1]).astype('int64')
            pred = nd.from_numpy(decode_multiple_block(
                out_1, out_2)).astype("float32").as_in_context(mx.cpu())
            IoU = BatchIoU(pred, gt)
            print(
                "Train: epoch {} batch {}/{},loss_cls = {:.3f},loss_reg = {:.3f},acc = {:.3f},IoU = {:.3f},time = {:.2f}"
                .format(epoch, idx, len(train_loader), loss_cls, loss_reg,
                        acc[0].asscalar(), IoU.mean(), end - start))
            sys.stdout.flush()