Beispiel #1
0
def validate(opts, val_list, file, val_iter, sw=None, num_plot=None):
    """Validation"""
    if num_plot is None:
        num_plot = val_list.__len__()
    loss_cumulative = []
    pred_list = []
    lab_list = []
    val_iter.reset()
    count = 0  # count number of pred and lab retained
    for (i, batch) in enumerate(val_iter):
        idx = batch.data[0].asnumpy()
        data = nd.zeros((args.batch_size, 60, 20, 240, 240), ctx=ctx)
        label = nd.zeros((args.batch_size, 20, 240, 240), ctx=ctx)
        for (j, jj) in zip(idx, range(opts.batch_size)):
            data[jj] = np.load("%sIMG.npy" % val_list[int(j)])
            label[jj] = np.load("%s%s.npy" % (val_list[int(j)], file))

        pred = net(data)
        L = loss(pred, label)
        loss_cumulative.append((L.expand_dims(1)))
        if count < num_plot:
            pred_list.append(pred)
            lab_list.append(label)
            count += 1
    return nd.concat(*loss_cumulative, dim=0).mean().asscalar(), \
           nd.squeeze(nd.concat(*pred_list, dim=0)), \
           nd.squeeze(nd.concat(*lab_list, dim=0))
    def forward(self, pred, target, mask):
        # truncate to the same size
        pred = pred.copy()
        bsz = pred.shape[0]
        target = target.copy()
        mask = mask.copy()
        #print('target.shape:',target.shape)                         #target.shape: (8, 30)
        target = target[:, :pred.shape[1]].reshape(-1, 1)
        #print('target.shape:',target.shape)                         #target.shape: (240, 1)
        mask = mask[:, :pred.shape[1]].reshape(-1, 1)
        #print('mask.shape:',mask.shape)                             #mask.shape: (240, 1)
        pred = pred.reshape(-1, pred.shape[2])
        #print('pred.shape:',pred.shape)                             #pred.shape: (240, 22)
        # compute loss
        #target = target.expand_dims(axis=0).broadcast_to(shape=(2,target.shape[0],target.shape[1]))
        loss = -nd.pick(pred, target).expand_dims(
            axis=1
        ) * mask  #gather(pred,dim=1,index = target) * mask   #gather_nd
        #print("loss.shape:",loss.shape,nd.pick(pred,target).shape)

        loss = nd.sum(loss) / nd.sum(mask)

        # compute accuracy
        idx = nd.argmax(pred, axis=1).astype('int64')
        #print( idx.dtype,target.dtype)
        correct = (idx == nd.squeeze(target))
        correct = correct.astype('float32') * nd.squeeze(mask)
        accuracy = nd.sum(correct) / nd.sum(mask)
        return loss, accuracy
Beispiel #3
0
def format_to_plot(tensor: nd.NDArray) -> np.ndarray:
    """format the input tensor from NCHW/CHW to HWC/HW for plotting"""
    if len(tensor.shape) == 4:
        tensor = nd.squeeze(tensor, axis=0)
    if tensor.shape[0] == 1:
        tensor = nd.squeeze(tensor, axis=0)
    else:
        tensor = nd.transpose(tensor, axes=(1, 2, 0))
    return tensor.asnumpy()
    def forward(self, x, padding=None):
        ctx = x.context
        batch_size = x.shape[0]
        length = x.shape[1]
        if padding is not None:
            # Flattten padding to [batch_size * length]
            pad_mask = nd.reshape(padding, (-1))
            nonpad_ids = nd.array(np.where(pad_mask.asnumpy() < 1e-9), ctx=ctx)

            # Reshape x to [batch_size*length, hidden_size] to remove padding
            x = nd.reshape(x, (-1, self.hidden_size))
            x = nd.gather_nd(x, indices=nonpad_ids)

            # Reshape x from 2 dimensions to 3 dimensions
            x = nd.expand_dims(x, axis=0)

        output = self.filter_dense_layer(x)
        if self.train:
            output = self.dropout(output)
        output = self.output_dense_layer(output)

        if padding is not None:
            output = nd.squeeze(output, axis=0)
            output = nd.scatter_nd(data=output,
                                   indices=nonpad_ids,
                                   shape=(batch_size * length,
                                          self.hidden_size))
            output = nd.reshape(output,
                                shape=(batch_size, length, self.hidden_size))

        return output
Beispiel #5
0
def benchmarking(opt, net, ctx):
    bs = opt.batch_size
    num_iterations = opt.num_iterations
    input_size = opt.input_size
    size = num_iterations * bs
    input_shape = (bs * opt.num_segments, 3, opt.new_length, input_size,
                   input_size)
    data = mx.random.uniform(-1.0,
                             1.0,
                             shape=input_shape,
                             ctx=ctx[0],
                             dtype='float32')
    if opt.new_length == 1:
        # this is for 2D input case
        data = nd.squeeze(data, axis=2)
    dry_run = 5

    from tqdm import tqdm
    with tqdm(total=size + dry_run * bs) as pbar:
        for n in range(dry_run + num_iterations):
            if n == dry_run:
                tic = time.time()
            output = net(data)
            output.wait_to_read()
            pbar.update(bs)
    speed = size / (time.time() - tic)
    print('With batch size %d , %d batches, throughput is %f imgs/sec' %
          (bs, num_iterations, speed))
Beispiel #6
0
    def __getitem__(self, idx):
        image, label = self.record_file[idx]
        image = image.asnumpy()
        orig_h, orig_w = image.shape[:2]

        # Resize input image and label to 512x512
        image = cv2.resize(image, self.im_shape)
        label[:, 0] = (label[:, 0] * self.im_shape[0]) / orig_w
        label[:, 2] = (label[:, 2] * self.im_shape[0]) / orig_w
        label[:, 1] = (label[:, 1] * self.im_shape[1]) / orig_h
        label[:, 3] = (label[:, 3] * self.im_shape[1]) / orig_h

        # Resize label to downsampled size
        label[:, 0] = (label[:, 0] * self.label_shape[0]) / self.im_shape[0]
        label[:, 2] = (label[:, 2] * self.label_shape[0]) / self.im_shape[0]
        label[:, 1] = (label[:, 1] * self.label_shape[1]) / self.im_shape[1]
        label[:, 3] = (label[:, 3] * self.label_shape[1]) / self.im_shape[1]

        heatmap, wh_target, wh_mask, center_reg, center_reg_mask = self.gen(
            label[:, :4], label[:, 4])
        x, _ = transform_test(nd.array(image),
                              short=min(self.im_shape[0], self.im_shape[1]))
        x = nd.squeeze(x)

        return x, heatmap, wh_target, wh_mask, center_reg, center_reg_mask
Beispiel #7
0
def batch_loss(encoder, sent_rnn, X, Y, vocab, loss, ctx):

    batch_size = X.shape[1]
    sentence_hidden, doc_encode = encoder(X)
    sentence_hidden = nd.transpose(sentence_hidden, axes=(1, 0, 2))

    # 我们将使用掩码变量mask来忽略掉标签为填充项PAD的损失
    # mask, num_not_pad_tokens = nd.ones(shape=(batch_size,), ctx=ctx), 0
    l = nd.array([0], ctx=ctx)

    # 以前所有步
    previous = sentence_hidden[0]

    # sent_hidden: (batch_size, hidden)
    for sent_hidden, y in zip(sentence_hidden, Y.T):

        y_h = sent_rnn(sent_hidden, previous, doc_encode)
        y_h = nd.squeeze(y_h)

        los = loss(y_h, y).sum()
        # print('los', los)
        l = l + los

        # 公式 7,这里使用强制教学
        y = nd.expand_dims(y, -1)
        previous = previous + sent_hidden * y

    return l / batch_size
Beispiel #8
0
 def forward(self, x):
     x = self.conv(x)
     x = nd.squeeze(x, axis=-1)
     x = self.norm(x)
     x = nd.relu(x)
     x = self.pooling(x).flatten()
     return x
Beispiel #9
0
def plot_features(features: nd.NDArray, scope: int = 9):
    """
    visualize feature maps per channel.

    :param features: feature map with shape 1xCxHxW
    :param scope: the index of feature maps to visualize is [0, scope)
    """
    scope = scope if scope < features.shape[1] else features.shape[1]
    feature_maps = nd.squeeze(features, axis=0).asnumpy()
    feature_map_combination = []
    # separate visualization
    row, col = get_row_col(scope)
    plt.figure()
    for i in range(0, scope):
        feature_map = feature_maps[i, :, :]
        feature_map_combination.append(feature_map)
        plt.subplot(row, col, i + 1)
        plt.imshow(feature_map)
        axis('off')
        title(f"feature map {i}")
    plt.show()
    # overlaps
    feature_map_sum = sum(ele for ele in feature_map_combination)
    plt.imshow(feature_map_sum)
    plt.show()
Beispiel #10
0
 def _mask(output, palette_key, test_citys=False):
     predict = nd.squeeze(nd.argmax(output, 1)).asnumpy()
     if test_citys:
         mask = city_train2label(predict)
     else:
         mask = my_color_palette(predict, palette_key)
     return mask
Beispiel #11
0
    def test_model_for_ml(self):
        net_path = os.path.join(
            DATA_DIR, 'model',
            'epoch-3-0.48-20180920164709.params-symbol.json')
        params_path = os.path.join(
            DATA_DIR, 'model',
            'epoch-3-0.48-20180920164709.params-0003.params')

        net = gluon.nn.SymbolBlock.imports(net_path, ['data'], params_path)

        im_path = os.path.join(DATA_DIR, 'imgs_data',
                               'd4YE10xHdvbwKJV5yBYsoJJke6K9b.jpg')
        img = image.imread(im_path)

        # plt.imshow(img.asnumpy())
        # plt.show()

        transform_fn = transforms.Compose([
            transforms.Resize(224, keep_ratio=True),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ])

        img = transform_fn(img)

        img = nd.expand_dims(img, axis=0)
        res = net(img.as_in_context(self.ctx[0]))
        res = sigmoid(nd.squeeze(res)).asnumpy()
        res = np.where(res > 0.5, 1, 0)
        indexes, = np.where(res == 1)
        res_classes = [self.class_names[i] for i in indexes.tolist()]
        # print(indexes.tolist())
        print('测试类别: {}'.format(res_classes))
Beispiel #12
0
def log_sum_exp(vec):
    # max_score shape: (self.tagset_size, batch_size, 1)
    max_score = nd.max(vec, axis=-1, keepdims=True)
    # score shape: (self.tagset_size, batch_size, 1)
    score = nd.log(nd.sum(nd.exp(vec - max_score), axis=-1, keepdims=True)) + max_score

    # return NDArray shape: (self.tagset_size, batch_size, )
    return nd.squeeze(score, axis=-1)
Beispiel #13
0
 def _mask(output,
           data_name,
           test_citys=False):  # set True when submit to Cityscapes
     predict = nd.squeeze(nd.argmax(output, 1)).asnumpy()
     if test_citys:
         mask = my_tools.city_train2label(predict)
     else:
         mask = my_tools.my_color_palette(predict, data_name)
     return mask
Beispiel #14
0
 def _mask(self, output: nd.NDArray, test_citys: bool = True):
     """get mask by output"""
     predict = nd.squeeze(nd.argmax(output, 1)).asnumpy()
     if test_citys:
         from mxnetseg.tools import city_train2label
         mask = city_train2label(predict)
     else:
         from mxnetseg.tools import get_color_palette
         mask = get_color_palette(predict,
                                  color_palette[self.data_name.lower()])
     return mask
Beispiel #15
0
 def forward(self, inputs, hidden):
     outputs = []
     for X in inputs:  # (60 x 64 x 20):
         output, hidden = self.rnn(X.expand_dims(0), hidden)
         output = self.drop(nd.squeeze(output))
     decoded = self.decoder(output)
     output_final = decoded.expand_dims(0)
     # outputs.append(decoded.expand_dims(0))
     # output_final = nd.concat(*outputs, dim=0).mean(axis=0)
     # return output_final, hidden
     return output_final, hidden
def test_model(opts, net):
    """test model loaded from checkpoints"""
    im = np.load('%ssplit%d/im.npy' % (opts.dir_in_test, opts.split))
    lab = np.load('%ssplit%d/lab.npy' % (opts.dir_in_test, opts.split))

    print(im.shape)
    for sub_idx in range(22, 23):
        out = nd.squeeze(
            nd.argmax(net(nd.array(im[sub_idx:sub_idx + 1], ctx=opts.ctx)),
                      axis=1)).asnumpy()
        out = post_proc(out, area_thr_slice=200)
        plot_(out, im[sub_idx], lab=lab[sub_idx])
def _pad_tensors_to_same_length(x, y):
    """Pad x and y so that the result have the same length (second dimension)"""
    x_length = x.shape[1]
    y_length = y.shape[1]

    max_length = max(x_length, y_length)
    x = nd.expand_dims(x, axis=0)
    x = nd.pad(x,
               mode="constant",
               constant_value=0,
               pad_width=(0, 0, 0, 0, 0, max_length - x_length, 0, 0))
    x = nd.squeeze(x, axis=0)

    y = nd.expand_dims(y, axis=0)
    y = nd.expand_dims(y, axis=0)
    y = nd.pad(y,
               mode="constant",
               constant_value=0,
               pad_width=(0, 0, 0, 0, 0, 0, 0, max_length - y_length))
    y = nd.squeeze(y, axis=0)
    y = nd.squeeze(y, axis=0)

    return x, y
Beispiel #18
0
def validate(net, val_data, val_items, val_shapes, ctx, size, classes):
    """Test on validation dataset."""
    clipper = gcv.nn.bbox.BBoxClipToImage()
    net.hybridize(static_alloc=True)
    print("---Detect Total {:d} Image Start.---".format(len(val_items)))

    result_dict = {}
    for ib, (batch, item) in enumerate(zip(val_data, val_items)):
        batch = split_and_load(batch, ctx_list=ctx)
        for x, y, im_scale in zip(*batch):
            ids, scores, bboxes = net(x)
            bboxes = clipper(bboxes, x)
            im_scale = im_scale.reshape((-1)).asscalar()
            bboxes *= im_scale
            inds = nd.argsort(nd.squeeze(ids, axis=(0, 2)), is_ascend=False)
            ids = nd.squeeze(ids,
                             axis=(0, 2)).asnumpy().astype(np.int8).tolist()
            valid_ids = [id for id in ids if id is not -1]
            valid_len = len(valid_ids)
            if valid_len > 0:  # valid_len must > 0
                inds = nd.slice_axis(inds, begin=0, end=valid_len, axis=0)
                scores = nd.take(scores, inds, axis=1)
                bboxes = nd.take(bboxes, inds, axis=1)
                scores = scores.asnumpy()
                bboxes = bboxes.asnumpy()
                for i, id in enumerate(valid_ids):
                    score = scores[:, i, 0][0]
                    xmin, ymin, xmax, ymax = bboxes[:, i, 0][
                        0], bboxes[:, i, 1][0], bboxes[:, i,
                                                       2][0], bboxes[:, i,
                                                                     3][0]
                    result_dict[id] = result_dict.get(
                        id, []) + [[item, score, xmin, ymin, xmax, ymax]]
                print("Detect Image {:s} Done.".format(item))
    print("---Detect Total {:d} Image Done.---".format(len(val_items)))
    return result_dict
Beispiel #19
0
    def forward(self, x, target):
        assert x.shape[1] == self.size #sequence length

        with autograd.pause():
            true_dist = nd.zeros_like(x) + self.smoothing / (self.size - 2)
            target_mask = nd.zeros_like(true_dist)
            for r, c in enumerate(target):
                target_mask[r,c] = 1
            true_dist = nd.where(target_mask, nd.zeros_like(true_dist) + self.confidence, true_dist)
            true_dist[:, self.padding_idx] = 0
            mask = nd.equal(target,self.padding_idx)

            if len(mask.shape) > 0:
                true_dist = nd.where( nd.squeeze(mask), nd.zeros_like(true_dist) ,true_dist )

        self.true_dist = true_dist
        return self.criterion(x, true_dist.as_in_context(cfg.ctx))
Beispiel #20
0
        def symbols_to_logits_fn(ids, i, cache):
            decoder_input = ids[:, -1:]
            # decoder的输入为Current decoded sequences 的最后一个

            decoder_input = self.embedding_softmax_layer(decoder_input)
            decoder_input = decoder_input + timing_signal[i:i + 1]

            self_attention_bias = decoder_self_attention_bias[:, :,
                                                              i:i + 1, :i + 1]
            decoder_outputs = self.decoder_stack(
                decoder_input,
                cache.get("encoder_outputs"), self_attention_bias,
                cache.get("encoder_decoder_attention_bias"), cache)
            logits = self.embedding_softmax_layer.linear(decoder_outputs)

            logits = nd.squeeze(logits, axis=1)
            return logits, cache
Beispiel #21
0
    def __call__(self, x, gt_labels=None, gt_boxes=None):
        with autograd.record():
            # raw_rpn_scores (B, rpn_num_samples, 1)
            # rpn_masks (B, rpn_num_samples) 1: pos 0: ignore -1: neg
            # raw_rpn_boxes (B, rpn_num_samples, 4)
            # raw_rpn_box_targets (B, rpn_num_samples, 4)
            # cls_pred (B, num_samples, C+1)
            # cls_targets (B, num_samples) value [-1, C) -1: ignore 0: background
            # box_pred (B, num_pos, 4)
            # reg_targets (B, num_pos, 4)
            # box_mask (B, num_pos, 4)
            raw_rpn_scores, rpn_masks, raw_rpn_boxes, raw_rpn_box_targets, cls_pred, cls_targets, box_pred, \
            reg_targets, box_mask = self.net(x, gt_labels, gt_boxes)

            raw_rpn_scores = nd.squeeze(raw_rpn_scores, axis=-1)
            rpn_score_targets = nd.where(rpn_masks == 1, rpn_masks, nd.zeros_like(rpn_masks))
            raw_rpn_scores = nd.reshape(raw_rpn_scores, (-1,))
            rpn_score_targets = nd.reshape(rpn_score_targets, (-1,))
            rpn_masks = nd.reshape(rpn_masks, (-1,))
            raw_rpn_boxes = nd.reshape(raw_rpn_boxes, (-1, 4))
            raw_rpn_box_targets = nd.reshape(raw_rpn_box_targets, (-1, 4))
            num_samples = (rpn_masks != 0).sum()
            rpn_cls_loss = self.rpn_cls_loss(raw_rpn_scores, rpn_score_targets, rpn_masks != 0)
            rpn_cls_loss = rpn_cls_loss.sum() / num_samples
            rpn_masks = nd.reshape(rpn_masks, (-1, 1))
            rpn_box_loss = self.rpn_box_loss(raw_rpn_boxes, raw_rpn_box_targets, rpn_masks == 1)
            rpn_box_loss = rpn_box_loss.sum() / num_samples

            B, num_samples, C_1 = cls_pred.shape
            cls_pred = nd.reshape(cls_pred, (-1, C_1))
            cls_targets = nd.reshape(cls_targets, (-1,))
            box_pred = nd.reshape(box_pred, (-1, 4))
            reg_targets = nd.reshape(reg_targets, (-1, 4))
            box_mask = nd.reshape(box_mask, (-1, 4))
            num_samples = (cls_targets >= 0).sum()
            rcnn_cls_loss = self.rcnn_cls_loss(cls_pred, cls_targets, cls_targets >= 0)
            rcnn_cls_loss = rcnn_cls_loss.sum() / num_samples
            rcnn_box_loss = self.rcnn_box_loss(box_pred, reg_targets, box_mask == 1)
            rcnn_box_loss = rcnn_box_loss.sum() / num_samples

            total_loss = rpn_cls_loss + rpn_box_loss + rcnn_cls_loss + rcnn_box_loss

            total_loss.backward()

        return rpn_cls_loss, rpn_box_loss, rcnn_cls_loss, rcnn_box_loss, total_loss
Beispiel #22
0
    def forward(self, decoder_output, encoder_output):
        """TODO: Docstring for forward.

        :decoder_output: TODO
        :encoder_output: TODO
        :returns: TODO

        """

        decoder_output = decoder_output.transpose([0, 2, 1])

        score = nd.batch_dot(encoder_output, decoder_output)

        weight = nd.softmax(score, axis=1)

        context = nd.batch_dot(nd.transpose(weight, [0, 2, 1]), encoder_output)

        return context, nd.squeeze(weight)
Beispiel #23
0
    def detect_img_to_class(self, img_path):
        img = image.imread(img_path)

        transform_fn = transforms.Compose([
            transforms.Resize(224, keep_ratio=True),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ])

        img = transform_fn(img)
        img = nd.expand_dims(img, axis=0)

        res = self.net_cl(img.as_in_context(self.ctx))
        res = sigmoid(nd.squeeze(res)).asnumpy()
        res = np.where(res > 0.5, 1, 0)
        indexes, = np.where(res == 1)
        return indexes
Beispiel #24
0
    def _convert_score(self, score):
        """from cls to score

        Parameters
        ----------
            score : ndarray
                network output

        Returns
        -------
            get feature map score though softmax
        """
        score = nd.transpose(score, axes=(1, 2, 3, 0))
        score = nd.reshape(score, shape=(2, -1))
        score = nd.transpose(score, axes=(1, 0))
        score = nd.softmax(score, axis=1)
        score = nd.slice_axis(score, axis=1, begin=1, end=2)
        score = nd.squeeze(score, axis=1)
        return score.asnumpy()
    def update(self, labels, preds):
        """
        :param labels: [(batch_per_device, ), (), ...]
        :param preds: [(batch_per_device, 1), (), ...]
        :return:
        """
        # binary_label = labels
        # binary_cls_logits = preds[0]
        num_acc = 0
        for lb, pd in zip(labels, preds):
            tp_RET = nd.concatenate([nd.expand_dims(lb, axis=1), pd], axis=1)
            if self.RET is None:
                self.RET = tp_RET
            else:
                self.RET = nd.concatenate([self.RET, tp_RET], axis=0)
            pred_label = nd.squeeze(pd) >= 0.5

            tp = nd.sum(pred_label == lb)
            num_acc = num_acc + tp.asscalar()

        self.sum_metric += num_acc
        self.num_inst += self.config.batch_size
Beispiel #26
0
def misclassified_pixels(prob: nd.NDArray,
                         label: nd.NDArray,
                         ignore_label: int = -1) -> np.ndarray:
    """
    return misclassified pixels.
    :param prob: the predicted probability with shape CHW
    :param label: the ground truth label with shape HW
    :param ignore_label: ignored label
    :return: numpy array of shape HW where 0 indicates misclassified pixels
    """
    # needs to process on cpu
    prob = prob.as_in_context(mx.cpu())
    label = label.as_in_context(mx.cpu())

    # determine equal or not to get misclassified pixels
    pred = nd.squeeze(nd.argmax(prob, axis=0)).astype('int32')
    mis_classify = (pred == label).asnumpy()

    # deal with ignored label via numpy
    label = label.asnumpy()
    mis_classify[label == ignore_label] = 1

    return mis_classify
Beispiel #27
0
    def forward(self, is_train, req, in_data, out_data, aux):
        fea = in_data[0]
        data = in_data[1]
        weights = in_data[2]
        prob = in_data[3]
        
        prob = prob / 3
        prob = nd.exp(prob)
        prob = prob/nd.sum(prob,axis=1,keepdims=1)
        
        w = nd.dot(prob,weights)
        w = nd.expand_dims(w,2)
        w = nd.expand_dims(w,3)     
        fea_w = fea*w
        
        d_w = data.shape[3]
        d_h = data.shape[2]
            
        w = fea.shape[2]
        n = fea.shape[0]
        
        
        fea = nd.mean(fea_w,axis=1,keepdims=1)
#        fea = nd.contrib.BilinearResize2D(fea,height=4*w,width=4*w)
#        w = 4*w
        
        max_val = nd.max(fea,axis=(2,3),keepdims=1)
        fea = fea / max_val
        
        
        if is_train:
            fea_mask = nd.greater_equal(fea,0.1)
            fea_mask2 = nd.greater_equal(fea,0.25)
        else:
            fea_mask = nd.greater_equal(fea,0.1)
            fea_mask2 = nd.greater_equal(fea,0.25)
        
        
        
        fea_mask1 = -nd.Pooling(-fea_mask,kernel=(5,5),pool_type='max',pad=(2,2))
        fea_mask1 = nd.Pooling(fea_mask1,kernel=(11,11),pool_type='max',pad=(5,5))
        cmask = nd.sum(fea_mask1,axis=(2,3),keepdims=1)
        cmask = nd.greater(fea,4)
        fea_mask = cmask * fea_mask2 * fea_mask1 + (1-cmask)*fea_mask2
        
        fea_mask = fea_mask[:,0,:,:].asnumpy()
        
        
        shape = self.outsize
        
        img_res = nd.zeros((n,3,shape,shape))
#        fea_res = nd.zeros((n,shape,shape))
        for i in range(n):
            m = fea_mask[i] 
            try:
                
                arg = np.float32(np.where(m==1))   
                ymin = np.int32(np.floor(np.min(arg[0])*(d_h/w)))
                ymax = np.int32(np.ceil(np.max(arg[0])*(d_h/w)))
                xmin = np.int32(np.floor(np.min(arg[1])*(d_w/w)))
                xmax = np.int32(np.ceil(np.max(arg[1])*(d_w/w)))
                
                x_center = (xmin+xmax)/2
                y_center = (ymin+ymax)/2
    #            
                x_length = xmax - xmin
                y_length = ymax - ymin
                longside = max(y_length,x_length)
                 
                x = np.int(max(x_center-longside/2,0))
                xmax = np.int(min(x_center+longside/2,d_w))
       
                l_x = xmax-x
                y = np.int(max(y_center-longside/2,0))
                ymax = np.int(min(y_center+longside/2,d_h))
                l_y = ymax-y
            
#            fea0 = fea[i]
#            fea0 = nd.expand_dims(fea0,0)
#            fea0 = nd.expand_dims(fea0,0)
#            fea0 = nd.contrib.BilinearResize2D(fea0,height=d_h,width=d_w)
#            
            
                img_crop = data[i,:,y:y+l_y,x:x+l_x]
            except:
                print(arg)
#            fea_crop = fea0[0,:,y:y+l_y,x:x+l_x]
            
            img_crop = nd.expand_dims(img_crop,0)
#            fea_crop  = nd.expand_dims(fea_crop,0)

            img_crop = nd.contrib.BilinearResize2D(img_crop,height=shape,width=shape)
#            fea_crop = nd.contrib.BilinearResize2D(fea_crop,height=shape,width=shape)
#                
#                if l_y > l_x:
#                    longside = int((l_y/l_x)*resize)
#                    img_crop = nd.contrib.BilinearResize2D(img_crop,height=longside,width=resize)
#                    s = int(np.floor((longside-shape)/2))
#                    img_crop = img_crop[:,:,s:s+shape,s1:s1+shape]
#                else:
#                    longside = int(l_x/l_y*resize)
#                    img_crop = nd.contrib.BilinearResize2D(img_crop,height=resize,width=longside) 
#                    s = int(np.floor((longside-shape)/2))
#                    img_crop = img_crop[:,:,s1:s1+shape,s:s+shape]
#                    
            
            img_res[i,:,:,:] = nd.squeeze(img_crop)
#            fea_res[i,:,:] = nd.squeeze(fea_crop)
#        fea_res = nd.expand_dims(fea_res,1)
#        img_res = img_res * fea_res
        self.assign(out_data[0], req[0], img_res)
Beispiel #28
0
def test_squeeze():
    a = nd.ones(shape=(LARGE_X, SMALL_Y))
    data = nd.expand_dims(a, axis=1)
    res = nd.squeeze(data)
    assert res.shape == a.shape
Beispiel #29
0
def test_squeeze():
    a = nd.ones(shape=LARGE_X)
    data = nd.expand_dims(a, axis=0)
    res = nd.squeeze(data)
    assert a[0] == res[0]
    assert res.shape == a.shape
Beispiel #30
0
def test_squeeze():
    a = nd.ones(shape=(LARGE_X, SMALL_Y))
    data = nd.expand_dims(a, axis=1)
    res = nd.squeeze(data)
    assert res.shape == a.shape
    dir_out = args.dir_out + 'run%d/epoch%d' % (args.run_id, args.epoch)
    if not os.path.exists(dir_out):
        os.makedirs(dir_out)

    net = gluon.nn.HybridSequential()
    net.add(load_net(args))
    net.add(Softmax())

    if args.test_model:
        test_model(args, net=net)

    start = time.time()
    # for idx in range(1):
    for idx in range(100):
        net.hybridize()
        im = norm1_v0(load_dat(args, idx + 1, only_T2=args.only_T2), thr=.01)
        out = nd.squeeze(nd.argmax(net(nd.array(im, ctx=args.ctx)),
                                   axis=1)).asnumpy()
        out = post_proc(out, area_thr_slice=500)
        print('sub %d, process time: % s' % (idx, time.time() - start))
        if args.plot_pred:
            plot_(out, im[0], invisible=True)
            plt.savefig('%s/sub%03d.png' % (dir_out, idx),
                        bbox_inches='tight',
                        dpi=500)
        if args.save_pred:
            np.save('%s/sub%03d.npy' % (dir_out, idx), out)
        plt.close()
        start = time.time()