コード例 #1
0
ファイル: mx_utils.py プロジェクト: Carryma11/yolov4-gluoncv
def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
    model.eval()
    t0 = time.time()

    if type(img) == np.ndarray and len(img.shape) == 3:  # cv2 image
        img = nd.from_numpy(img.transpose(2, 0, 1)).\
        img = nd.broadcast_div(img,255.0)
        img.expend_dims(axis=0)
    elif type(img) == np.ndarray and len(img.shape) == 4:
        img = nd.from_numpy(img.transpose(0, 3, 1, 2))
        img = nd.broadcast_div(img,255.0)
    else:
        print("unknow image type")
        exit(-1)

    if use_cuda:
        img = img.cuda()
    img = nd.autograd.Variable(img)

    t1 = time.time()

    output = model(img)

    t2 = time.time()

    print('-----------------------------------')
    print('           Preprocess : %f' % (t1 - t0))
    print('      Model Inference : %f' % (t2 - t1))
    print('-----------------------------------')

    return utils.post_processing(img, conf_thresh, nms_thresh, output)
コード例 #2
0
    def forward(self, outputs, targets_heatmaps, targets_scale, targets_offset,
                targets_inds, targets_reg_mask):
        opt = self.opt
        hm_loss, wh_loss, off_loss = 0, 0, 0
        for s in range(opt.num_stacks):
            output = outputs[s]
            if not opt.mse_loss:
                output['hm'] = _sigmoid(output['hm'])

            # Optional: Use ground truth for validation
            if opt.eval_oracle_hm:
                output['hm'] = targets_heatmaps
            if opt.eval_oracle_wh:
                output['wh'] = nd.from_numpy(
                    gen_oracle_map(targets_scale.asnumpy(),
                                   targets_inds.asnumpy(),
                                   output['wh'].shape[3],
                                   output['wh'].shape[2])).as_in_context(
                                       opt.device)
            if opt.eval_oracle_offset:
                output['reg'] = nd.from_numpy(
                    gen_oracle_map(targets_offset.asnumpy(),
                                   targets_inds.asnumpy(),
                                   output['reg'].shape[3],
                                   output['reg'].shape[2])).as_in_context(
                                       opt.device)

            # 1. heatmap loss
            hm_loss = hm_loss + self.crit(output['hm'],
                                          targets_heatmaps) / opt.num_stacks

            # 2. scale loss
            if opt.wh_weight > 0:
                wh_loss = wh_loss + self.crit_reg(
                    output['wh'], targets_reg_mask, targets_inds,
                    targets_scale) / opt.num_stacks

            # 3. offset loss
            if opt.reg_offset and opt.off_weight > 0:
                off_loss = off_loss + self.crit_reg(
                    output['reg'], targets_reg_mask, targets_inds,
                    targets_offset) / opt.num_stacks

        # total loss
        loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \
               opt.off_weight * off_loss
        loss_stats = {
            'loss': loss,
            'hm_loss': hm_loss,
            'wh_loss': wh_loss,
            'off_loss': off_loss
        }
        return loss
コード例 #3
0
def validate(epoch,
             val_loader,
             model,
             crit_cls,
             crit_reg,
             opt,
             ctx,
             gen_shape=False):
    """
    One validation
    """
    generated_shapes = []
    original_shapes = []
    sample_prob = opt.inner_sample_prob
    loss_cls_sum, loss_reg_sum, n = 0.0, 0.0, 0

    for idx, data in enumerate(val_loader):
        start = time.time()

        shapes, labels, masks, params, param_masks = data[0], data[1], data[
            2], data[3], data[4]
        gt = shapes
        shapes = nd.expand_dims(shapes, axis=1)

        shapes = shapes.as_in_context(ctx)
        labels = labels.as_in_context(ctx)
        masks = masks.as_in_context(ctx)
        params = params.as_in_context(ctx)
        param_masks = param_masks.as_in_context(ctx)
        with autograd.train_mode():
            out = model.decode(shapes)
        #out = model(shapes, labels, sample_prob)
        bsz, n_block, n_step = labels.shape
        labels = labels.reshape(bsz, n_block * n_step)
        masks = masks.reshape(bsz, n_block * n_step)
        out_pgm = out[0].reshape(bsz, n_block * n_step, opt.program_size + 1)

        bsz, n_block, n_step, n_param = params.shape
        params = params.reshape(bsz, n_block * n_step, n_param)
        param_masks = param_masks.reshape(bsz, n_block * n_step, n_param)
        out_param = out[1].reshape(bsz, n_block * n_step, n_param)
        loss_cls, acc = crit_cls(out_pgm, labels, masks)
        loss_reg = crit_reg(out_param, params, param_masks)

        end = time.time()

        loss_cls = loss_cls.mean().asscalar()
        loss_reg = loss_reg.mean().asscalar()

        if idx % opt.info_interval == 0:
            out_1 = nd.round(out[0]).astype('int64')
            out_2 = nd.round(out[1]).astype('int64')
            pred = nd.from_numpy(decode_multiple_block(
                out_1, out_2)).astype("float32").as_in_context(mx.cpu())
            IoU = BatchIoU(pred, gt)
            print(
                "Test: epoch {} batch {}/{}, loss_cls = {:.3f}, loss_reg = {:.3f}, acc = {:.3f}, IoU = {:.3f} time = {:.3f}"
                .format(epoch, idx, len(val_loader), loss_cls, loss_reg,
                        acc[0].asscalar(), IoU.mean(), end - start))
            sys.stdout.flush()
コード例 #4
0
ファイル: main_mxnet.py プロジェクト: Jungminchae/RecoTour
def train_step(model, optimizer, data, epoch):

    running_loss, update_count = 0.0, 0
    N = data.shape[0]
    idxlist = list(range(N))
    np.random.shuffle(idxlist)
    training_steps = len(range(0, N, args.batch_size))

    with trange(train_steps) as t:
        for batch_idx, start_idx in zip(t, range(0, N, args.batch_size)):
            t.set_description("epoch: {}".format(epoch + 1))

            end_idx = min(start_idx + args.batch_size, N)
            X_inp = data[idxlist[start_idx:end_idx]]
            X_inp = nd.from_numpy(X_inp.toarray()).as_in_context(ctx)

            with autograd.record():
                if model.__class__.__name__ == "MultiVAE":
                    if args.total_anneal_steps > 0:
                        anneal = min(
                            args.anneal_cap, 1.0 * update_count / args.total_anneal_steps
                        )
                    else:
                        anneal = args.anneal_cap
                    update_count += 1
                    loss = model(X_inp, anneal)
                elif model.__class__.__name__ == "MultiDAE":
                    loss = model(X_inp)

            trainer.step(X_inp.shape[0])
            running_loss += nd.mean(loss).asscalar()
            avg_loss = running_loss / (batch_idx + 1)
            t.set_postfix(loss=avg_loss)
コード例 #5
0
ファイル: main_mxnet.py プロジェクト: Jungminchae/RecoTour
def eval_step(data_tr, data_te, data_type="valid"):

    running_loss, update_count = 0.0, 0
    eval_idxlist = list(range(data_tr.shape[0]))
    eval_N = data_tr.shape[0]
    eval_steps = len(range(0, eval_N, args.batch_size))

    n100_list, r20_list, r50_list = [], [], []

    with trange(eval_steps) as t:
        for batch_idx, start_idx in zip(t, range(0, eval_N, args.batch_size)):
            t.set_description(data_type)

            end_idx = min(start_idx + args.batch_size, eval_N)
            X_tr = data_tr[eval_idxlist[start_idx:end_idx]]
            X_te = data_te[eval_idxlist[start_idx:end_idx]]

            X_tr_inp = nd.from_numpy(X_inp.toarray()).as_in_context(ctx)

            with autograd.predict_mode():
                if model.__class__.__name__ == "MultiVAE":
                    if args.total_anneal_steps > 0:
                        anneal = min(
                            args.anneal_cap, 1.0 * update_count / args.total_anneal_steps
                        )
                    else:
                    anneal = args.anneal_cap
                    loss = model(X_tr_inp, anneal)
                elif model.__class__.__name__ == "MultiDAE":
                    loss = models(X_tr_inp)

            running_loss += loss.item()
            avg_loss = running_loss / (batch_idx + 1)

            # Exclude examples from training set
            X_out = X_out.asnumpy()
            X_out[X_tr.nonzero()] = -np.inf

            n100 = NDCG_binary_at_k_batch(X_out, X_te, k=100)
            r20 = Recall_at_k_batch(X_out, X_te, k=20)
            r50 = Recall_at_k_batch(X_out, X_te, k=50)
            n100_list.append(n100)
            r20_list.append(r20)
            r50_list.append(r20)

            t.set_postfix(loss=avg_loss)

        n100_list = np.concatenate(n100_list)
        r20_list = np.concatenate(r20_list)
        r50_list = np.concatenate(r50_list)

    return avg_loss, np.mean(n100_list), np.mean(r20_list), np.mean(r50_list)
コード例 #6
0
    def forward(self, graph, feat, etypes):
        """Compute Gated Graph Convolution layer.

        Parameters
        ----------
        graph : DGLGraph
            The graph.
        feat : mxnet.NDArray
            The input feature of shape :math:`(N, D_{in})` where :math:`N`
            is the number of nodes of the graph and :math:`D_{in}` is the
            input feature size.
        etypes : torch.LongTensor
            The edge type tensor of shape :math:`(E,)` where :math:`E` is
            the number of edges of the graph.

        Returns
        -------
        mxnet.NDArray
            The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
            is the output feature size.
        """
        with graph.local_scope():
            assert graph.is_homogeneous, \
                "not a homogeneous graph; convert it with to_homogeneous " \
                "and pass in the edge type as argument"
            zero_pad = nd.zeros(
                (feat.shape[0], self._out_feats - feat.shape[1]),
                ctx=feat.context)
            feat = nd.concat(feat, zero_pad, dim=-1)

            for _ in range(self._n_steps):
                graph.ndata['h'] = feat
                for i in range(self._n_etypes):
                    eids = (etypes.asnumpy() == i).nonzero()[0]
                    eids = nd.from_numpy(eids, zero_copy=True).as_in_context(
                        feat.context).astype(graph.idtype)
                    if len(eids) > 0:
                        graph.apply_edges(
                            lambda edges:
                            {'W_e*h': self.linears[i](edges.src['h'])}, eids)
                graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a'))
                a = graph.ndata.pop('a')
                feat = self.gru(a, [feat])[0]
            return feat
コード例 #7
0
ファイル: misc.py プロジェクト: huzhouxiang/shape2gram-mxnet
def gather(self, dim, index):
    """
    Gathers values along an axis specified by ``dim``.

    For a 3-D tensor the output is specified by:
        out[i][j][k] = input[index[i][j][k]][j][k]  # if dim == 0
        out[i][j][k] = input[i][index[i][j][k]][k]  # if dim == 1
        out[i][j][k] = input[i][j][index[i][j][k]]  # if dim == 2

    Parameters
    ----------
    dim:
        The axis along which to index
    index:
        A tensor of indices of elements to gather

    Returns
    -------
    Output Tensor
    """
    idx_xsection_shape = index.shape[:dim] + \
        index.shape[dim + 1:]
    self_xsection_shape = self.shape[:dim] + self.shape[dim + 1:]
    if idx_xsection_shape != self_xsection_shape:
        raise ValueError(
            "Except for dimension " + str(dim) +
            ", all dimensions of index and self should be the same size")
    if index.dtype != np.dtype('int_'):
        raise TypeError("The values of index must be integers")
    data_swaped = nd.swapaxes(self, 0, dim).asnumpy()
    index_swaped = nd.swapaxes(index, 0, dim).asnumpy()
    #print(data_swaped,index_swaped)
    #print("index_swaped\n",index_swaped,index_swaped.shape,"data_swaped\n",data_swaped,data_swaped.shape,'\n')
    gathered = nd.from_numpy(np.choose(
        index_swaped, data_swaped)).as_in_context(d2l.try_gpu())
    return nd.swapaxes(gathered, 0, dim)
コード例 #8
0
 def _init_bias(self, name, data):
     print('Init', name, data.shape)
     data[:] = nd.from_numpy(self.initial_bias)
コード例 #9
0
 def combine(x, y):
     y.setflags(write=1)
     y = nd.from_numpy(y).as_in_context(self.ctx)
     y = y.expand_dims(axis=1)
     return nd.concat(x, y, dim=1)
コード例 #10
0
def train(epoch, train_loader, model,loss, optimizer, opt,ctx,train_loss,train_iou):
    """
    one epoch training for program executor
    """
    loss_sum,iou_sum,n = 0.0,0.0,0
    for idx, data in enumerate(train_loader):
        start_t = time.time()

        shape, label, param = data
        bsz = shape.shape[0]
        n_step = label.shape[1]
        #print("label.shape:",label)
        #print("n_step:",n_step,"bsz:",bsz,"stop_id:",stop_id)
        
        index = np.array(list(map(lambda x: n_step, label)))-1
        #index = label
        
        # add noise during training, making the executor accept
        # continuous output from program generator
        label = label.reshape(-1,1).asnumpy()
        pgm_vector = 0.2 * np.random.uniform(0,1,(bsz * n_step, stop_id))
        pgm_noise = 0.2 *np.random.uniform(0,1,label.shape)
        pgm_value = 1 - pgm_noise
        #print('pgm_val.shape:',pgm_value.shape,'label.shape:',label.shape,'label.shape:',label.shape)
        pgm_vector = scatter_numpy(pgm_vector,1,label,pgm_value).reshape(bsz,n_step,stop_id)
        
        
        param_noise = nd.random_uniform(0,1,shape=param.shape)
        param_vector = param + 0.6 * (param_noise - 0.5)
        #print("param_vector.shape:",param_vector.shape)
        gt = shape.as_in_context(ctx)
        #print(pgm_vector.dtype)
        index = nd.from_numpy(index).astype('int64').as_in_context(ctx)
        pgm_vector = nd.from_numpy(pgm_vector).astype('float32').as_in_context(ctx)
        param_vector = param_vector.as_in_context(ctx)


        with autograd.record():
            pred = model(pgm_vector, param_vector, index)
            scores = nd.log_softmax(pred,axis=1)
            pred0 = scores[:,0].squeeze()*opt.n_weight
            pred1 = scores[:,1].squeeze()*opt.p_weight
            l = -nd.where(gt, pred1, pred0).mean((1,2,3))
            #l = -(nd.pick(scores1, gt, axis=1, keepdims=True)*opt.n_weight
            #    +nd.pick(scores2,(1-gt), axis=1, keepdims=True)*opt.p_weight).mean((1,2,3,4))
        l.backward()
                                        
        #clip_gradient(optimizer, opt.grad_clip)
        #optimizer._allreduce_grads();

        optimizer.step(l.shape[0],ignore_stale_grad=True)
        
        l = l.mean().asscalar()
        
        pred = nd.softmax(pred,axis = 1)
        pred = pred[:, 1, :, :, :]
        s1 = gt.reshape(-1, 32, 32, 32).astype('float32').as_in_context(mx.cpu())
        s2 = pred.squeeze().as_in_context(mx.cpu())
        #print(s2.shape)
        s2 = (s2 > 0.5)

        batch_iou = BatchIoU(s1, s2)
        iou = batch_iou.mean()
        end_t = time.time()
        loss_sum+=l
        n+=1
        iou_sum+=iou

        if idx % (opt.info_interval * 10) == 0:
            print("Train: epoch {} batch {}/{}, loss13 = {:.3f}, iou = {:.3f}, time = {:.3f}"
                  .format(epoch, idx, len(train_loader), l, iou, end_t - start_t))
            sys.stdout.flush()
        
    train_loss.append(loss_sum/n)
    train_iou.append(iou_sum/n)
コード例 #11
0
def validate(epoch, val_loader, model, loss, opt, ctx,val_loss,val_iou, gen_shape=False):

    # load pre-fixed randomization
    try:
        rand1 = np.load(opt.rand1)
        rand2 = np.load(opt.rand2)
        rand3 = np.load(opt.rand3)
    except:
        rand1 = np.random.rand(opt.batch_size * opt.seq_length, stop_id).astype(np.float32)
        rand2 = np.random.rand(opt.batch_size * opt.seq_length, 1).astype(np.float32)
        rand3 = np.random.rand(opt.batch_size, opt.seq_length, max_param - 1).astype(np.float32)
        np.save(opt.rand1, rand1)
        np.save(opt.rand2, rand2)
        np.save(opt.rand3, rand3)

    generated_shapes = None
    original_shapes = None
    
    loss_sum,iou_sum,n = 0.0,0.0,0
    for idx, data in enumerate(val_loader):
        start_t = time.time()

        shape, label, param = data

        bsz = shape.shape[0]
        n_step = label.shape[1]
        index = np.array(list(map(lambda x: n_step, label)))
        index = index - 1

        # add noise during training, making the executor accept
        # continuous output from program generator
        
        label = label.reshape(-1,1).asnumpy()
        pgm_vector = 0.1*rand1
        pgm_noise = 0.1*rand2
        pgm_value = np.ones(label.shape) - pgm_noise
        #print('pgm_val.shape:',pgm_value.shape,'label.shape:',label.shape,'label.shape:',label.shape)
        pgm_vector = scatter_numpy(pgm_vector,1,label,pgm_value).reshape(bsz,n_step,stop_id)

        param_noise = nd.from_numpy(rand3)
        #print(param.shape,param_noise.shape)
        param_vector = param + 0.6 * (param_noise - 0.5)
        
        
        gt = shape.astype('float32').as_in_context(ctx)
        index = nd.from_numpy(index).astype('int64').as_in_context(ctx)
        pgm_vector = nd.from_numpy(pgm_vector).as_in_context(ctx)
        param_vector = param_vector.as_in_context(ctx)
        #prediction
        pred = model(pgm_vector, param_vector, index)
        scores = nd.log_softmax(pred,axis=1)
        pred0 = scores[:,0].squeeze()*opt.p_weight
        pred1 = scores[:,1].squeeze()*opt.n_weight
        l = -nd.where(gt, pred1, pred0).mean((1,2,3))
        #print(pred2.dtype,gt.dtype)
        #l = loss(pred,gt,sample_weight = nd.array([opt.n_weight,opt.p_weight]))

        l = l.mean().asscalar()
        pred = nd.softmax(pred,axis=1)
        pred = pred[:, 1, :, :, :]
        s1 = gt.reshape(-1, 32, 32, 32).as_in_context(mx.cpu())
        s2 = pred.squeeze().as_in_context(mx.cpu())
        s2 = (s2 > 0.5)

        batch_iou = BatchIoU(s1, s2)
        iou = batch_iou.mean()
        loss_sum+=l
        n+=1
        iou_sum+=iou
        
        if(idx+1)%5==0 and gen_shape:
            if original_shapes is None:
                original_shapes = s1.expand_dims(axis=0)
                generated_shapes = s2.expand_dims(axis=0)
            else:
                original_shapes = nd.concat(original_shapes,s1.expand_dims(axis=0),dim=0)
                generated_shapes = nd.concat(generated_shapes,s2.expand_dims(axis=0),dim=0)
        end_t = time.time()

        if (idx + 1) % opt.info_interval == 0:
            print("Test: epoch {} batch {}/{}, loss13 = {:.3f}, iou = {:.3f}, time = {:.3f}"
                  .format(epoch, idx + 1, len(val_loader), l, iou, end_t - start_t))
            sys.stdout.flush()
        if(idx+1>len(val_loader)/10):
            
            break;     
    val_loss.append(loss_sum/n)
    val_iou.append(iou_sum/n)

    return generated_shapes, original_shapes
コード例 #12
0
def train(epoch, train_loader, generator, executor, criterion, optimizer, opt,
          ctx):
    """
    one epoch guided adaptation
    """
    def set_bn_eval(m):
        if m.prefix[:9] == 'batchnorm':
            m._kwargs['use_global_stats'] = True
            m.grad_req = 'null'

    executor.apply(set_bn_eval)
    for idx, data in enumerate(train_loader):
        start = time.time()
        shapes = data.as_in_context(ctx)
        raw_shapes = data
        shapes = shapes.expand_dims(axis=1)
        with autograd.record():
            pgms, params = generator.decode(shapes)

            # truly rendered shapes
            pgms_int = nd.round(pgms).astype('int64')
            params_int = nd.round(params).astype('int64')

            # neurally rendered shapes
            pgms = nd.exp(pgms)
            bsz, n_block, n_step, n_vocab = pgms.shape
            pgm_vector = pgms.reshape(bsz * n_block, n_step, n_vocab)
            bsz, n_block, n_step, n_param = params.shape
            param_vector = params.reshape(bsz * n_block, n_step, n_param)
            index = (n_step - 1) * nd.ones(bsz * n_block).astype('int64')
            index = index.as_in_context(ctx)

            pred = executor(pgm_vector, param_vector, index)
            pred = nd.softmax(pred, axis=1)
            #print(pred.shape)
            pred = pred[:, 1]
            pred = pred.reshape(bsz, n_block, 32, 32, 32)

            rec = nd.max(pred, axis=1)
            #print("rec.shape:",rec.shape,"shapes.shape:",shapes.shape)
            #rec1 = rec.expand_dims(axis=1)
            rec1 = nd.log(rec + 1e-11)
            rec0 = nd.log(1 - rec + 1e-11)
            #rec_all = nd.concat(rec0, rec1, dim=1)
            #rec_all1 = nd.log(rec_all + 1e-10)
            #rec_all2 = nd.log(1-rec_all + 1e-10)
            gt = shapes.squeeze().astype('int64')
            loss = -nd.where(gt, rec1, rec0).mean(axis=(1, 2, 3))
            #loss = -(nd.pick(rec_all1,gt,axis = 1,keepdims=True)).mean(axis = (1,2,3,4))
            #loss = criterion(rec_all, gt)
        loss.backward()
        optimizer.step(loss.shape[0], ignore_stale_grad=True)
        l = loss.mean().asscalar()

        rendered_shapes = decode_multiple_block(pgms_int, params_int)
        rendered_shapes = nd.from_numpy(rendered_shapes).astype(
            'float32').as_in_context(mx.cpu())
        IoU2 = BatchIoU(raw_shapes, rendered_shapes)
        reconstruction = (rec.as_in_context(mx.cpu()) > 0.5).astype('float32')
        IoU1 = BatchIoU(reconstruction, raw_shapes)
        #print("IoU1:",IoU1,IoU2)
        IoU1 = IoU1.mean()
        IoU2 = IoU2.mean()

        end = time.time()

        if idx % opt.info_interval == 0:
            print(
                "Train: epoch {} batch {}/{}, loss = {:.3f}, IoU1 = {:.3f}, IoU2 = {:.3f}, time = {:.3f}"
                .format(epoch, idx, len(train_loader), l, IoU1, IoU2,
                        end - start))
            sys.stdout.flush()
コード例 #13
0
def run():
    # get options
    opt = options_guided_adaptation.parse()
    opt_gen = options_train_generator.parse()
    opt_exe = options_train_executor.parse()
    print('===== arguments: guided adaptation =====')
    for key, val in vars(opt).items():
        print("{:20} {}".format(key, val))
    print('===== arguments: guided adaptation =====')

    if not os.path.isdir(opt.save_folder):
        os.makedirs(opt.save_folder)

    # build loaders
    train_set = ShapeNet3D(opt.train_file)
    train_loader = gdata.DataLoader(dataset=train_set,
                                    batch_size=opt.batch_size,
                                    shuffle=True,
                                    num_workers=opt.num_workers)

    val_set = ShapeNet3D(opt.val_file)
    val_loader = gdata.DataLoader(dataset=val_set,
                                  batch_size=opt.batch_size,
                                  shuffle=False,
                                  num_workers=opt.num_workers)

    def visual(path, epoch, gen_shapes, file_name, nums_samples):
        data = gen_shapes.transpose((0, 3, 2, 1))
        data = np.flip(data, axis=2)
        num_shapes = data.shape[0]
        for i in range(min(nums_samples, num_shapes)):
            voxels = data[i]
            save_name = os.path.join(path, file_name.format(epoch, i))
            visualization(voxels,
                          threshold=0.1,
                          save_name=save_name,
                          uniform_size=0.9)

    ctx = d2l.try_gpu()

    # load program generator
    generator = BlockOuterNet(opt_gen)
    generator.init_blocks(ctx)
    generator.load_parameters("model of blockouternet")

    # load program executor
    executor = RenderNet(opt_exe)
    executor.initialize(init=init.Xavier(), ctx=ctx)
    executor.load_parameters("model of executor")

    # build loss functions
    criterion = gloss.SoftmaxCrossEntropyLoss(axis=1, from_logits=True)

    optimizer = Trainer(
        generator.collect_params(), "adam", {
            "learning_rate": opt.learning_rate,
            "wd": opt.weight_decay,
            'beta1': opt.beta1,
            'beta2': opt.beta2,
            'clip_gradient': opt.grad_clip
        })

    print("###################")
    print("testing")
    gen_shapes, ori_shapes = validate(0,
                                      val_loader,
                                      generator,
                                      opt,
                                      ctx,
                                      gen_shape=True)
    #visual('imgs of chairs/adaption/chair/',0,ori_shapes,'GT {}-{}.png',8)
    #visual('imgs of chairs/adaption/chair/',0,gen_shapes,'epoch{}-{}.png',8)

    gen_shapes = nd.from_numpy(gen_shapes)
    ori_shapes = nd.from_numpy(ori_shapes)
    #print(gen_shapes.dtype,ori_shapes.dtype)
    #print("done",ori_shapes.shape,gen_shapes.shape)

    IoU = BatchIoU(gen_shapes, ori_shapes)
    #print(IoU)
    print("iou: ", IoU.mean())

    best_iou = 0
    print(opt.epochs)
    for epoch in range(1, opt.epochs + 1):
        print("###################")
        print("adaptation")
        train(epoch, train_loader, generator, executor, criterion, optimizer,
              opt, ctx)
        print("###################")
        print("testing")
        gen_shapes, ori_shapes = validate(epoch,
                                          val_loader,
                                          generator,
                                          opt,
                                          ctx,
                                          gen_shape=True)
        #visual('imgs of chairs/adaption/chair/',epoch,gen_shapes,'epoch{}-{}.png',8)
        gen_shapes = nd.from_numpy(gen_shapes)
        ori_shapes = nd.from_numpy(ori_shapes)
        IoU = BatchIoU(gen_shapes, ori_shapes)

        print("iou: ", IoU.mean())

        if epoch % opt.save_interval == 0:

            print('Saving...')
            generator.save_parameters("generator of GA on shapenet")
            optimizer.save_states("optimazer of generator of GA on shapenet")

        if IoU.mean() >= best_iou:
            print('Saving best model')
            generator.save_parameters("generator of GA on shapenet")
            optimizer.save_states("optimazer of generator of GA on shapenet")
            best_iou = IoU.mean()
コード例 #14
0
ファイル: general.py プロジェクト: troyliu0105/mxdetection
 def do(self, img, target):
     if not isinstance(img, nd.NDArray):
         img = nd.from_numpy(img)
     img = nd.image.to_tensor(img)
     target = nd.array(target)
     return img, target
コード例 #15
0
def train(epoch, train_loader, model, crit_cls, crit_reg, optimizer, opt, ctx):
    """
    One epoch training
    """
    cls_w = opt.cls_weight
    reg_w = opt.reg_weight
    # the prob: > 1
    # the input of step t Operator where is missing FInferType attributeis always sampled from the output of step t-1
    sample_prob = opt.inner_sample_prob

    for idx, data in enumerate(train_loader):
        start = time.time()
        #data, pgm, pgm_mask, param, param_mask
        shapes, labels, masks, params, param_masks = data[0], data[1], data[
            2], data[3], data[4]
        gt = shapes
        shapes = nd.expand_dims(shapes, axis=1)
        #print(labels[0],params[0])
        shapes = shapes.as_in_context(ctx)

        labels = labels.as_in_context(ctx)
        labels2 = labels.as_in_context(ctx)

        masks = masks.as_in_context(ctx)
        params = params.as_in_context(ctx)
        param_masks = param_masks.as_in_context(ctx)

        #shapes.attach_grad(),labels.attach_grad()
        with autograd.record():
            out = model(shapes, labels, sample_prob)
            #out = model.decode(shapes)

            # reshape
            bsz, n_block, n_step = labels.shape
            labels = labels.reshape(bsz, -1)
            masks = masks.reshape(bsz, -1)
            out_pgm = out[0].reshape(bsz, n_block * n_step,
                                     opt.program_size + 1)

            bsz, n_block, n_step, n_param = params.shape
            params = params.reshape(bsz, n_block * n_step, n_param)
            param_masks = param_masks.reshape(bsz, n_block * n_step, n_param)
            out_param = out[1].reshape(bsz, n_block * n_step, n_param)

            loss_cls, acc = crit_cls(out_pgm, labels, masks)
            loss_reg = crit_reg(out_param, params, param_masks)
            loss = cls_w * loss_cls + reg_w * loss_reg
        loss.backward()

        optimizer.step(bsz, ignore_stale_grad=True)

        loss_cls = loss_cls.mean().asscalar()
        loss_reg = loss_reg.mean().asscalar()

        end = time.time()

        if idx % (opt.info_interval * 10) == 0:
            out_1 = nd.round(out[0]).astype('int64')
            out_2 = nd.round(out[1]).astype('int64')
            pred = nd.from_numpy(decode_multiple_block(
                out_1, out_2)).astype("float32").as_in_context(mx.cpu())
            IoU = BatchIoU(pred, gt)
            print(
                "Train: epoch {} batch {}/{},loss_cls = {:.3f},loss_reg = {:.3f},acc = {:.3f},IoU = {:.3f},time = {:.2f}"
                .format(epoch, idx, len(train_loader), loss_cls, loss_reg,
                        acc[0].asscalar(), IoU.mean(), end - start))
            sys.stdout.flush()