def calculate_accuracy(x, y, w, b):
    """
    Given x, y and parameters (w, b), calculate accuracy and cross entropy loss.
    """
    pred_y = predict(x, w, b)
    pred_labels = nd.argmax(pred_y, axis=1)
    true_labels = nd.argmax(y, axis=1)
    return nd.mean(pred_labels == true_labels).asscalar(), cross_entropy_loss(pred_y, y).asscalar()
def plot_prediction(batch_x, batch_y, i, w, b):
    """
    Given a batch (of x, y) and an index i, plot image and predictions.
    """
    xi = batch_x[i:(i+1)]
    yi = batch_y[i:(i+1)]
    img = xi.asnumpy().reshape((28, 28))
    plt.imshow(img, cmap='gray')
    pred_label = nd.argmax(predict(xi, w, b), axis=1).asscalar()
    true_label = nd.argmax(yi, axis=1).asscalar()
    plt.title("Prediction: {}, True: {}".format(pred_label, true_label))
    plt.show()
    pass
def test_model_save_load(gluon_model, model_data, model_path):
    _, _, test_data = model_data
    expected = nd.argmax(gluon_model(test_data), axis=1)

    mlflow.gluon.save_model(gluon_model, model_path)
    # Loading Gluon model
    model_loaded = mlflow.gluon.load_model(model_path, ctx.cpu())
    actual = nd.argmax(model_loaded(test_data), axis=1)
    assert all(expected == actual)
    # Loading pyfunc model
    pyfunc_loaded = mlflow.pyfunc.load_model(model_path)
    test_pyfunc_data = pd.DataFrame(test_data.asnumpy())
    pyfunc_preds = pyfunc_loaded.predict(test_pyfunc_data)
    assert all(np.argmax(pyfunc_preds.values, axis=1) == expected.asnumpy())
Ejemplo n.º 4
0
def yolo2_decoder(x, num_class, anchor_scales):
    """
    yolo2_decoder 会把卷积的通道分开,转换,最后转成我们需要的检测框
    out: (index,score,xmin,ymin,xmax,ymax)
    """
    stride = num_class + 5
    x = x.transpose((0, 2, 3, 1))  # (Batch,H,W,Stride*Anchor)
    x = x.reshape((0, 0, 0, -1, stride))  # (Batch,H,W,Anchor,Stride)

    xy_pred = x.slice_axis(begin=0, end=2, axis=-1)
    wh = x.slice_axis(begin=2, end=4, axis=-1)
    score_pred = x.slice_axis(begin=4, end=5, axis=-1)
    cls_pred = x.slice_axis(begin=5, end=stride, axis=-1)

    xy = nd.sigmoid(xy_pred)
    x, y = transform_center(xy)
    w, h = transform_size(wh, anchor_scales)
    score = nd.sigmoid(score_pred)
    cid = nd.argmax(cls_pred, axis=-1, keepdims=True)

    left = nd.clip(x - w / 2, 0, 1)
    top = nd.clip(y - h / 2, 0, 1)
    right = nd.clip(x + w / 2, 0, 1)
    bottom = nd.clip(y + h / 2, 0, 1)
    output = nd.concat(*[cid, score, left, top, right, bottom], dim=4)
    return output, cls_pred, score, nd.concat(*[xy, wh], dim=4)
Ejemplo n.º 5
0
def yolo2_forward(x, num_class, anchor_scales):
    """Transpose/reshape/organize convolution outputs."""
    stride = num_class + 5
    # transpose and reshape, 4th dim is the number of anchors
    x = x.transpose((0, 2, 3, 1))
    x = x.reshape((0, 0, 0, -1, stride))
    # now x is (batch, m, n, stride), stride = num_class + 1(object score) + 4(coordinates)
    # class probs
    cls_pred = x.slice_axis(begin=0, end=num_class, axis=-1)
    # object score
    score_pred = x.slice_axis(begin=num_class, end=num_class + 1, axis=-1)
    score = nd.sigmoid(score_pred)
    # center prediction, in range(0, 1) for each grid
    xy_pred = x.slice_axis(begin=num_class + 1, end=num_class + 3, axis=-1)
    xy = nd.sigmoid(xy_pred)
    # width/height prediction
    wh = x.slice_axis(begin=num_class + 3, end=num_class + 5, axis=-1)
    # convert x, y to positions relative to image
    x, y = transform_center(xy)
    # convert w, h to width/height relative to image
    w, h = transform_size(wh, anchor_scales)
    # cid is the argmax channel
    cid = nd.argmax(cls_pred, axis=-1, keepdims=True)
    # convert to corner format boxes
    half_w = w / 2
    half_h = h / 2
    left = nd.clip(x - half_w, 0, 1)
    top = nd.clip(y - half_h, 0, 1)
    right = nd.clip(x + half_w, 0, 1)
    bottom = nd.clip(y + half_h, 0, 1)
    output = nd.concat(*[cid, score, left, top, right, bottom], dim=4)
    return output, cls_pred, score, nd.concat(*[xy, wh], dim=4)
Ejemplo n.º 6
0
    def update(self, labels, preds):
        """Updates the internal evaluation result.

        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data with class indices as values, one per sample.

        preds : list of `NDArray`
            Prediction values for samples. Each prediction value can either be the class index,
            or a vector of likelihoods for all classes.
        """
        labels, preds = check_label_shapes(labels, preds, True)

        for label, pred_label in zip(labels, preds):
            if pred_label.shape != label.shape:
                pred_label = ndarray.argmax(pred_label, axis=self.axis)
            pred_label = pred_label.asnumpy().astype('int32')
            label = label.asnumpy().astype('int32')
            # flatten before checking shapes to avoid shape miss match
            label = label.flat
            pred_label = pred_label.flat

            check_label_shapes(label, pred_label)

            num_correct = (pred_label == label).sum()
            self.sum_metric += num_correct
            self.global_sum_metric += num_correct
            self.num_inst += len(pred_label)
            self.global_num_inst += len(pred_label)
Ejemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "dir",
        help=
        "Directory containing the image files (.png) we'll run inference on. \
              Relative to the root of the project (tulip-fields/)")
    args = parser.parse_args()

    ctx = mx.gpu(0)

    batch_size = 8
    img_size = 256

    root = os.path.dirname(__file__)
    imgdir = os.path.join(root, os.pardir, args.dir)
    checkpoint_dir = os.path.join(root, 'checkpoints', 'unet')

    # Instantiate a U-Net and train it
    net = unet.Unet()
    net.load_params(os.path.join(checkpoint_dir, 'best_unet.params'), ctx)

    print("Scanning dir {}".format(imgdir))
    files = glob.glob(os.path.join(imgdir, '*wms*.png'))
    print("Found {} images".format(len(files)))
    nbatches = int(math.ceil(len(files) / batch_size))

    reader = ImageReader(img_size, ctx)

    for n in range(nbatches):
        files_batch = files[n * batch_size:(n + 1) * batch_size]
        batch = reader.load_batch(files_batch)
        batch = batch.as_in_context(ctx)
        preds = nd.argmax(net(batch), axis=1)
        save_batch(files_batch, preds)
Ejemplo n.º 8
0
    def viterbi_decode(self, feats):
        backpointers = []
        init_vvars = nd.full((1, self.tagset_size), -10000.)
        init_vvars[0][self.tag_dictionary.get_idx_for_item(START_TAG)] = 0
        forward_var = init_vvars

        for feat in feats:
            next_tag_var = forward_var.reshape((1, -1)).tile((self.tagset_size, 1)) + self.transitions.data()
            bptrs_t = nd.argmax(next_tag_var, axis=1)
            viterbivars_t = next_tag_var[list(range(len(bptrs_t))), bptrs_t]
            forward_var = viterbivars_t + feat
            backpointers.append(bptrs_t)

        terminal_var = forward_var + self.transitions.data()[self.tag_dictionary.get_idx_for_item(STOP_TAG)]
        terminal_var[self.tag_dictionary.get_idx_for_item(STOP_TAG)] = -10000.
        terminal_var[self.tag_dictionary.get_idx_for_item(START_TAG)] = -10000.
        best_tag_id = int(terminal_var.argmax(axis=0).asscalar())
        path_score = terminal_var[best_tag_id]
        best_path = [best_tag_id]
        for bptrs_t in reversed(backpointers):
            best_tag_id = bptrs_t[best_tag_id]
            best_path.append(int(best_tag_id.asscalar()))
        start = best_path.pop()
        assert start == self.tag_dictionary.get_idx_for_item(START_TAG)
        best_path.reverse()
        return path_score, best_path
Ejemplo n.º 9
0
    def update(self, labels, preds):
        """Updates the internal evaluation result.
        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data with class indices as values, one per sample.
        preds : list of `NDArray`
            Prediction values for samples. Each prediction value can either be the class index,
            or a vector of likelihoods for all classes.
        """
        labels, preds = check_label_shapes(labels, preds, True)

        for label, pred_label in zip(labels, preds):
            if pred_label.shape != label.shape:
                pred_label = ndarray.argmax(pred_label, axis=self.axis)
            pred_label = pred_label.asnumpy().astype('int32')
            label = label.asnumpy().astype('int32')

            labels, preds = check_label_shapes(label, pred_label)

            valid = (labels.reshape(-1, 1) != self.ignore_labels).all(axis=-1)

            self.sum_metric += np.logical_and(pred_label.flat == label.flat,
                                              valid).sum()
            self.num_inst += np.sum(valid)
Ejemplo n.º 10
0
def evaluate_accuracy(data_iterator, num_examples, batch_size, params, net,
                      pool_type, pool_size, pool_stride, act_type, dilate_size,
                      nf):
    numerator = 0.
    denominator = 0.
    for batch_i, (data, label) in enumerate(data_iterator):
        data = data.as_in_context(ctx).reshape((batch_size, 1, 1, -1))
        label = label.as_in_context(ctx)
        label_one_hot = nd.one_hot(label, 10)
        output, _ = net(data,
                        params,
                        pool_type=pool_type,
                        pool_size=pool_size,
                        pool_stride=pool_stride,
                        act_type=act_type,
                        dilate_size=dilate_size,
                        nf=nf)
        predictions = nd.argmax(output, axis=1)
        numerator += nd.sum(predictions == label)
        denominator += data.shape[0]
        print('Evaluating accuracy. (complete percent: %.2f/100' %
              (1.0 * batch_i / (num_examples // batch_size) * 100) + ')',
              end='')
        sys.stdout.write("\r")
    return (numerator / denominator).asscalar()
Ejemplo n.º 11
0
    def update(self, labels, preds):
        """Updates the internal evaluation result.

        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data.

        preds : list of `NDArray`
            Predicted values.
        """
        check_label_shapes(labels, preds)

        for label, pred_label in zip(labels, preds):
            if pred_label.shape != label.shape:
                pred_label = ndarray.argmax(pred_label, axis=self.axis)
            pred_label = pred_label.asnumpy().astype('int32')
            label = label.asnumpy().astype('int32')

            check_label_shapes(label, pred_label)

            pred_label_ = pred_label.flat
            label_ = label.flat
            for i in range(len(pred_label_)):
                if label_[i] == self.c:
                    if pred_label_[i] == self.c:
                        self.TP += 1
                    else:
                        self.FN += 1
                elif pred_label_[i] == self.c:
                    self.FP += 1
Ejemplo n.º 12
0
 def select_action(self, state, exploration_rate):
     if np.random.rand() < exploration_rate:
         return self.environment.action_space.sample()
     else:
         return int(
             nd.argmax(self.model(nd.expand_dims(state, axis=0)),
                       0).asnumpy()[0])
Ejemplo n.º 13
0
def test(model, test_loader, ctx):
    """
    Test the model on test dataset.
    """
    print("Testing...")
    start_time = time.time()
    model.load_params(model_file, ctx=ctx)  # restore the best parameters

    y_pred, y_true = [], []
    for data, label in test_loader:
        data, label = data.as_in_context(ctx), label.as_in_context(ctx)
        with autograd.record(
                train_mode=False):  # set the training_mode to False
            output = model(data)
        pred = nd.argmax(output, axis=1).asnumpy().tolist()
        y_pred.extend(pred)
        y_true.extend(label.asnumpy().tolist())

    test_acc = metrics.accuracy_score(y_true, y_pred)
    test_f1 = metrics.f1_score(y_true, y_pred, average='macro')
    print("Test accuracy: {0:>7.2%}, F1-Score: {1:>7.2%}".format(
        test_acc, test_f1))

    print("Precision, Recall and F1-Score...")
    print(
        metrics.classification_report(y_true,
                                      y_pred,
                                      target_names=['POS', 'NEG']))

    print('Confusion Matrix...')
    cm = metrics.confusion_matrix(y_true, y_pred)
    print(cm)

    print("Time usage:", get_time_dif(start_time))
Ejemplo n.º 14
0
    def update(self, labels, preds):
        """Updates the internal evaluation result.

        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data.

        preds : list of `NDArray`
            Predicted values.
        """
        check_label_shapes(labels, preds)

        for label, pred_label in zip(labels, preds):
            if pred_label.shape != label.shape:
                pred_label = ndarray.argmax(pred_label, axis=self.axis)
            pred_label = pred_label.asnumpy().astype('int32')
            label = label.asnumpy().astype('int32')
            print 'pred: ',pred_label
            print 'gt: ',label
            check_label_shapes(label, pred_label)

            keep_inds = np.where(label != 0)
            pred_label = pred_label[keep_inds]
            label = label[keep_inds]

            self.sum_metric += (pred_label.flat == label.flat).sum()
            self.num_inst += len(pred_label.flat)
Ejemplo n.º 15
0
def verify_loaded_model(net):
    """Run inference using ten random images.
    Print both input and output of the model"""

    def transform(data, label):
        return data.astype(np.float32)/255, label.astype(np.float32)

    # Load ten random images from the test dataset
    sample_data = mx.gluon.data.DataLoader(mx.gluon.data.vision.MNIST(train=False, transform=transform),
                                  10, shuffle=True)

    for data, label in sample_data:

        # Display the images
        img = nd.transpose(data, (1,0,2,3))
        img = nd.reshape(img, (28,10*28,1))
        imtiles = nd.tile(img, (1,1,3))
        plt.imshow(imtiles.asnumpy())
        plt.show()

        # Display the predictions
        data = nd.transpose(data, (0, 3, 1, 2))
        out = net(data.as_in_context(ctx))
        predictions = nd.argmax(out, axis=1)
        print('Model predictions: ', predictions.asnumpy())

        break
Ejemplo n.º 16
0
 def evaluate(self, eval_data):
     acc = mx.metric.Accuracy()
     for eval_x, eval_y in eval_data:
         output = self.net(eval_x)
         prediction = ndarray.argmax(output, axis=1)
         acc.update(labels=eval_y, preds=prediction)
     return acc.get()[1]
Ejemplo n.º 17
0
def train_block(net, train_data, eval_data=None):
    net.initialize()
    accuracy = mx.metric.Accuracy()
    loss = gluon.loss.SoftmaxCrossEntropyLoss()
    trainer = gluon.Trainer(net.collect_params(), 'sgd', optimizer_params={'learning_rate':0.1})

    epoch = 10
    for e in range(epoch):
        for i, (train_x, train_y) in enumerate(train_data):
            train_x = train_x.as_in_context(mx.cpu())#.reshape((-1, 784))

            train_y = train_y.as_in_context(mx.cpu())
            with autograd.record():
                output = net(train_x)
                one_loss = loss(output, train_y)
                one_loss.backward()
            prediction = ndarray.argmax(output, axis=1)
            accuracy.update(labels=train_y, preds=prediction)
            trainer.step(train_x.shape[0])
            curr_loss = ndarray.mean(one_loss).asscalar()
            if i%200 == 0:
                print('epoch:{}, step:{}, loss:{:.4f}, accuracy:{:.4f}'.format(e, i, curr_loss, accuracy.get()[1]))

        eval_accu = evaluate(net, eval_data)
        print("epoch:{}, evaluate accuracy:{:.2f}".format(e, eval_accu))
Ejemplo n.º 18
0
def batch_intersection_union(output, target, nclass, ignore_bg=False):
    """mIoU"""
    # inputs are NDarray, output 4D, target 3D
    # ignore_bg=True, ignoring class 0; ignore_bg = False, use class 0
    predict = F.argmax(output, 1)
    target = target.astype(predict.dtype)
    mini = 0
    maxi = nclass - 1
    nbins = nclass
    predict = predict.asnumpy()
    target = target.asnumpy()
    if ignore_bg:
        mini = 1
        nbins -= 1
        predict = predict * (target > 0).astype(predict.dtype)
    else:
        predict = predict * (target >= 0).astype(predict.dtype)
    #intersection = predict * (F.equal(predict, target)).astype(predict.dtype)
    intersection = predict * (predict == target)
    # areas of intersection and union
    area_inter, _ = np.histogram(intersection, bins=nbins, range=(mini, maxi))
    area_pred, _ = np.histogram(predict, bins=nbins, range=(mini, maxi))
    area_lab, _ = np.histogram(target, bins=nbins, range=(mini, maxi))
    area_union = area_pred + area_lab - area_inter
    return area_inter, area_union
Ejemplo n.º 19
0
def accuracy(output, label):
    """
        output : [1,2,3,4,5], label : 2
    """

    labelHat = nd.argmax(output, axis=1)

    return nd.sum(labelHat == label).asscalar()
Ejemplo n.º 20
0
def batch_pix_accuracy(output, target):
    """PixAcc"""
    # inputs are NDarray, output 4D, target 3D
    predict = F.argmax(output, 1) + 1
    target = target.astype(predict.dtype) + 1
    pixel_labeled = (target > 0).sum().asscalar()
    pixel_correct = (F.equal(predict, target)*(target > 0)).sum().asscalar()
    return pixel_correct, pixel_labeled
Ejemplo n.º 21
0
 def predict_one_image(self,X):
     prdY = self.net(X)
     prdCls, prdObj, prdXYXY = self.cvt_output_for_predict(prdY)
     cid = nd.argmax(prdCls, axis=-1, keepdims=True)
     output = nd.concat(cid, prdObj, prdXYXY,dim=-1)
     output = output.reshape((0,-1,6)) #cid, objectness x0,y0,x1,y1
     output = nd.contrib.box_nms(output) #cid may be changed
     return output
def predict2ndimg(predict):
    result = ndarray.argmax(predict, axis=1)
    colormap = ndarray.array(MICCAI_colormap, ctx=mx.gpu(),
                             dtype='uint8')  # voc_colormap

    ndimg = colormap[result[:, :, :]]
    ndimg = ndarray.transpose(ndimg, (0, 3, 1, 2))
    ndimg = ndimg.astype(('float32'))
    return ndimg
Ejemplo n.º 23
0
def batch_pix_accuracy(output, target):
    """PixAcc"""
    # inputs are NDarray, output 4D, target 3D
    predict = F.argmax(output, 1)
    predict = predict.asnumpy() + 1
    target = target.asnumpy().astype(predict.dtype) + 1
    pixel_labeled = np.sum(target > 0)
    pixel_correct = np.sum((predict == target) * (target > 0))
    assert pixel_correct <= pixel_labeled, "Correct area should be smaller than Labeled"
    return pixel_correct, pixel_labeled
Ejemplo n.º 24
0
 def process(self, element):
     """
    Returns clear images after filtering the cloudy ones
    :param element:
    :return:
    """
     batch = self.reader.load_batch(element)
     batch = batch.as_in_context(self.ctx)
     preds = nd.argmax(self.net(batch), axis=1)
     self.save_batch(element, preds)
Ejemplo n.º 25
0
def test_model_log_load(gluon_model, model_data, model_path):
    _, _, test_data = model_data
    expected = nd.argmax(gluon_model(test_data), axis=1)

    artifact_path = "model"
    with kiwi.start_run():
        kiwi.gluon.log_model(gluon_model, artifact_path=artifact_path)
        model_uri = "runs:/{run_id}/{artifact_path}".format(
            run_id=kiwi.active_run().info.run_id, artifact_path=artifact_path)

    # Loading Gluon model
    model_loaded = kiwi.gluon.load_model(model_uri, ctx.cpu())
    actual = nd.argmax(model_loaded(test_data), axis=1)
    assert all(expected == actual)
    # Loading pyfunc model
    pyfunc_loaded = kiwi.pyfunc.load_model(model_uri)
    test_pyfunc_data = pd.DataFrame(test_data.asnumpy())
    pyfunc_preds = pyfunc_loaded.predict(test_pyfunc_data)
    assert all(np.argmax(pyfunc_preds.values, axis=1) == expected.asnumpy())
Ejemplo n.º 26
0
def character_recognize(pic, text_area):
    img = cv2.imread(pic)
    mser = cv2.MSER_create()
    import mxnet.ndarray as nd
    import mxnet as mx
    with open('chinese.txt') as to_read:
        chinese = [m_line.strip() for m_line in to_read]
    net = get_net(len(chinese))
    ctx = mx.gpu()
    net.load_params(
        './single_character_recognition/train_model/chinese_2.para', ctx)

    for m_text_area in text_area:
        point1_x, point1_y, point2_x, point2_y = m_text_area[0], m_text_area[
            1], m_text_area[2], m_text_area[3]
        # roi of text area
        pic_text_area = img[int(point1_y):int(point2_y),
                            int(point1_x):int(point2_x), :]

        gray = cv2.cvtColor(pic_text_area, cv2.COLOR_RGB2GRAY)
        binaray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                        cv2.THRESH_BINARY_INV, 5, 5)
        regions, boxes = mser.detectRegions(gray)
        to_predict_pics = []
        boxes = sorted(boxes, key=lambda x: x[0])
        if len(boxes) == 0:
            continue
        # remove the similar boxes from redundant boxes
        refined_boxes = [boxes[0]]
        superposition_ratio = .5
        for i in range(1, len(boxes)):
            last_x, last_width = boxes[i - 1][0], boxes[i - 1][2]
            cur_x, cur_width = boxes[i][0], boxes[i][2]
            total_len = cur_x + cur_width - last_x
            share_len = last_width - cur_x + last_x
            if share_len / total_len < superposition_ratio:
                refined_boxes.append(boxes[i])
        for box in refined_boxes:
            x, y, w, h = box
            if w / pic_text_area.shape[1] <= 0.9 and w / h < 1.5:
                char_pic = cv2.resize(binaray[y:y + h, x:x + w], (28, 28))
                to_predict_pics.append(char_pic / 255)
        if len(to_predict_pics) == 0:
            continue
        output = net(
            nd.array(to_predict_pics).reshape(
                (-1, 1, 28, 28)).as_in_context(ctx))
        predictions = nd.argmax(output, axis=1).asnumpy()
        predictions_char = [
            chinese[int(m_prediction)] for m_prediction in predictions
        ]
        print(' '.join(predictions_char))
        cv2.imshow('to_recognize', pic_text_area)
        cv2.waitKey(0)
def verifyLoadedModel(net, data):
    data = nd.transpose(nd.array(data), (0, 3, 1, 2))
    out = net(data)
    predictions = nd.argmax(out, axis=1)

    text_labels = [
        't-shirt', 'trouser', 'pullover', 'dress', 'coat', 'sandal', 'shirt',
        'sneaker', 'bag', 'ankle boot'
    ]

    return [(int(p), text_labels[int(p)]) for p in predictions.asnumpy()]
Ejemplo n.º 28
0
def batch_pix_accuracy(output, target):
    """PixAcc"""
    # inputs are NDarray, output 4D, target 3D
    # the category -1 is ignored class, typically for background / boundary
    predict = F.argmax(output, 1)
    predict = predict.asnumpy() + 1
    target = target.asnumpy().astype(predict.dtype) + 1
    pixel_labeled = np.sum(target > 0)
    pixel_correct = np.sum((predict == target)*(target > 0))
    assert pixel_correct <= pixel_labeled, "Correct area should be smaller than Labeled"
    return pixel_correct, pixel_labeled
Ejemplo n.º 29
0
 def accuracy(data):
     good = 0
     total = 0
     for (X, Y) in data:
         features = X.as_in_context(model_ctx)
         label = Y.as_in_context(model_ctx).reshape(Y.size, -1)
         prediction = nd.argmax(net(features),
                                axis=1).reshape(Y.size, -1)
         good += nd.sum(prediction == label).asscalar()
         total += len(X)
     return good / total
Ejemplo n.º 30
0
 def check_rgKL(self, v):
     ndx = nd.argmax(v.reshape([-1, self.n_val]), axis=1)
     pv = np.ones(shape=self.prob_RGs.shape)
     for intcoor in ndx.asnumpy().reshape([-1, self.n_vis]):
         rg2 = cal_rg2(int2xy(intcoor))
         i = int(rg2 * 2)
         if i >= len(pv): i = len(pv) - 1
         pv[i] += 1
     pv /= np.sum(pv)
     prg = self.prob_RGs.asnumpy()
     KL = np.sum(prg * np.log(prg / pv))
     return KL
Ejemplo n.º 31
0
    def update(self, labels, preds):
        """Updates the internal evaluation result.
        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data with class indices as values, one per sample.
        preds : list of `NDArray`
            Prediction values for samples. Each prediction value can either be the class index,
            or a vector of likelihoods for all classes.
        """
        labels, preds = check_label_shapes(labels, preds, True)

        for label, pred_label in zip(labels, preds):
            if pred_label.shape != label.shape:
                pred_label = ndarray.argmax(pred_label, axis=self.axis)
            pred_label = pred_label.asnumpy().astype('int32')
            label = label.asnumpy().astype('int32')

            labels, preds = check_label_shapes(label, pred_label)

            valid = (labels.reshape(-1, 1) != self.ignore_labels).all(axis=-1)

            self.sum_metric += np.logical_and(pred_label.flat == label.flat, valid).sum()
            self.num_inst += np.sum(valid)
for i in range(total_batch):
    num_valid = batch_size if (i + 1) * batch_size <= X_test.shape[0]\
        else X_test.shape[0] - i * batch_size
    data_npy = np.take(X_test,
                       indices=np.arange(i * batch_size, (i + 1) * batch_size),
                       axis=0,
                       mode="clip")
    label_npy = np.take(y_test,
                        indices=np.arange(i * batch_size, (i + 1) * batch_size),
                        axis=0,
                        mode="clip")
    test_net.forward(data_batch=mx.io.DataBatch(data=[nd.array(data_npy)],
                                                label=None),
                     is_train=False)
    logits_nd = test_net.get_outputs()[0]
    pred_cls = nd.argmax(logits_nd, axis=-1).asnumpy()
    correct_count += (pred_cls[:num_valid] == label_npy[:num_valid]).sum()
acc = correct_count / float(X_test.shape[0])
print('Accuracy:', acc)

# 6. Get one and predict
test_net.reshape(data_shapes=[mx.io.DataDesc(name='data', shape=(1, 1, 28, 28), layout='NCHW')],
                 label_shapes=None)
r = np.random.randint(0, X_test.shape[0])
test_net.forward(data_batch=mx.io.DataBatch(data=[nd.array(X_test[r:r + 1])],
                                            label=None))
logits_nd = test_net.get_outputs()[0]
print("Label: ", int(y_test[r]))
print("Prediction: ", int(nd.argmax(logits_nd, axis=1).asnumpy()[0]))
'''
Epoch: 0001 cost = 0.222577997
Ejemplo n.º 33
0
def argmax(vec):
    # return the argmax as a python int
    idx = nd.argmax(vec, axis=1)
    return to_scalar(idx)
correct_counts = [0 for i in range(num_models)]
ensemble_correct_count = 0
total_num = 0
for i in range(total_batch):
    num_valid = batch_size if (i + 1) * batch_size <= X_test.shape[0]\
        else X_test.shape[0] - i * batch_size
    data_npy, label_npy, num_valid = get_batch(i, batch_size, X_test, y_test)
    prob_ensemble = nd.zeros(shape=(label_npy.shape[0], 10), ctx=mx.gpu())
    for i, test_net in enumerate(test_nets):
        test_net.forward(data_batch=mx.io.DataBatch(data=[nd.array(data_npy)],
                                                    label=None),
                         is_train=False)
        logits_nd = test_net.get_outputs()[0]
        prob_nd = nd.softmax(logits_nd)
        prob_ensemble += prob_nd
        pred_cls = nd.argmax(prob_nd, axis=-1).asnumpy()
        correct_counts[i] += (pred_cls[:num_valid] == label_npy[:num_valid]).sum()
    prob_ensemble /= num_models
    ensemble_pred_cls = nd.argmax(prob_ensemble, axis=-1).asnumpy()
    ensemble_correct_count += (ensemble_pred_cls[:num_valid] == label_npy[:num_valid]).sum()
for i in range(num_models):
    print(i, 'Accuracy:', correct_counts[i] / float(X_test.shape[0]))
print('Ensemble accuracy:', ensemble_correct_count / float(X_test.shape[0]))
'''
Learning Started!
Epoch: 0001 cost = [ 0.23813407  0.23717315]
Epoch: 0002 cost = [ 0.07455271  0.07434764]
Epoch: 0003 cost = [ 0.05925059  0.06024327]
Epoch: 0004 cost = [ 0.05032205  0.04895757]
Epoch: 0005 cost = [ 0.04573197  0.0439943 ]
Epoch: 0006 cost = [ 0.04143022  0.0416003 ]