Beispiel #1
0
def evaluate_accuracy(data_iterator, network):
    """ Measure the accuracy of ResNet

    Parameters
    ----------
    data_iterator: Iter
      examples of dataset
    network:
      ResNet

    Returns
    ----------
    tuple of array element
    """
    acc = mx.metric.Accuracy()

    # Iterate through data and label
    for i, (data, label) in enumerate(data_iterator):

        # Get the data and label into the GPU
        data = data.as_in_context(ctx[0])
        label = label.as_in_context(ctx[0])

        # Get network's output which is a probability distribution
        # Apply argmax on the probability distribution to get network's classification.
        output = network(data)
        predictions = nd.argmax(output, axis=1)

        # Give network's prediction and the correct label to update the metric
        acc.update(preds=predictions, labels=label)

    # Return the accuracy
    return acc.get()[1]
def evaluate_accuracy(data_iterator, net):
    acc = mx.metric.Accuracy()
    for i, (data, label) in enumerate(data_iterator):
        data = data.as_in_context(ctx)
        label = label.as_in_context(ctx)
        output = net(data)
        predictions = nd.argmax(output, axis=1)
        acc.update(preds=predictions, labels=label)
    return acc.get()[1]
Beispiel #3
0
def get_class(pgm):
    print(pgm)
    if len(pgm.shape) == 3:
        idx = nd.argmax(pgm, axis=2)
    elif len(pgm.shape) == 2:
        idx = pgm
    else:
        raise IndexError("dimension of pgm is wrong")
    return idx
Beispiel #4
0
def evaluate_accuracy(data_iterator, net):
    """Function to evaluate accuracy of any data iterator passed to it as an argument"""
    acc = mx.metric.Accuracy()
    for data, label in data_iterator:
        output = net(data)
        predictions = nd.argmax(output, axis=1)
        predictions = predictions.reshape((-1, 1))
        acc.update(preds=predictions, labels=label)
    return acc.get()[1]
Beispiel #5
0
def predict(img):

    X = test_iter._dataset.normalize_image(img)

    X = X.transpose((2, 0, 1)).expand_dims(axis=0)

    pred = nd.argmax(net(X.as_in_context(ctx[0])), axis=1)

    return pred.reshape((pred.shape[1], pred.shape[2]))
Beispiel #6
0
def measure_performance(model, ctx, data_iter):
    acc = mx.metric.Accuracy()
    for _, (data, labels) in enumerate(data_iter):
        data = data.as_in_context(ctx)
        labels = labels.as_in_context(ctx)
        output = model(data)
        predictions = nd.argmax(output, axis=1)
        acc.update(preds=predictions, labels=labels)
    return acc.get()[1]
Beispiel #7
0
def main():
    parse = argparse.ArgumentParser()
    parse.add_argument('--filename', default='I9-9.h5')
    parse.add_argument('--is_train', type=bool, default=True)
    parse.add_argument('--is_mavs', type=bool, default=False)
    parse.add_argument('--batch_size', type=int, default=16)
    parse.add_argument('--epoches', type=int, default=12)
    augs = parse.parse_args()

    ctx = mx.gpu()
    epoches = augs.epoches
    batch_size = augs.batch_size
    acc = mx.metric.Accuracy()
    dataset = readH5.IndianDatasets(augs.filename)
    train_data = gluon.data.DataLoader(dataset,
                                       batch_size=batch_size,
                                       shuffle=True,
                                       last_batch='discard')
    #loss = gluon.loss.SoftmaxCrossEntropyLoss()
    loss = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=True)
    net = ResNet3D.ResNet3D(9)  # the number of classes is 9 ...
    net.initialize(mx.init.Xavier(), ctx=ctx, force_reinit=True)
    net.hybridize()
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {
        'learning_rate': 0.1,
        'wd': 0.0001
    })

    acc.reset()
    for epoch in range(epoches):
        for i, batch in enumerate(train_data):
            data, label = batch
            data = data.copyto(ctx)
            label = label.copyto(ctx)
            with autograd.record():
                pred = net(data)
                loss_t = loss(pred, nd.argmax(
                    label, axis=1, keepdims=True))  # shape: batch_size * 1
            loss_t.backward()
            trainer.step(batch_size)
            acc.update(label.argmax(axis=1, keepdims=True),
                       pred.argmax(axis=1, keepdims=True))

            if ((i + 1) % 100) == 0:
                print('current loss = ', nd.sum(loss_t).asscalar())

        if epoch == 8:
            trainer.set_learning_rate(trainer.learning_rate * 0.1)
        if epoch == 10:
            trainer.set_learning_rate(trainer.learning_rate * 0.1)

        print('Epoch: %d, training acc: %s %.3f' % (epoch, *acc.get()))

    try:
        net.collect_params().save('ResNet3D.params')
    except:
        print('model params save failed.')
Beispiel #8
0
def evaluate_accuracy(data_iterator, net):
    current_ctx = ctx[0]
    acc = mx.metric.Accuracy()
    for d, l in data_iterator:
        data = d.as_in_context(current_ctx)
        label = l.as_in_context(current_ctx)
        output = net(data)
        predictions = nd.argmax(output, axis=1)
        acc.update(preds=predictions, labels=label)
    return acc.get()[1]
Beispiel #9
0
def evaluate_metrics(metrics, data_iterator, net, nb_batches=None, ctx=mx.gpu(), sparse_policy_label=False,
                     apply_select_policy_from_plane=True):
    """
    Runs inference of the network on a data_iterator object and evaluates the given metrics.
    The metric results are returned as a dictionary object.

    :param metrics: List of mxnet metrics which must have the
    names ['value_loss', 'policy_loss', 'value_acc_sign', 'policy_acc']
    :param data_iterator: Gluon data iterator object
    :param net: Gluon network handle
    :param nb_batches: Number of batches to evaluate (early stopping).
     If set to None all batches of the data_iterator will be evaluated
    :param ctx: MXNET data context
    :param sparse_policy_label: Should be set to true if the policy uses one-hot encoded targets
     (e.g. supervised learning)
    :param apply_select_policy_from_plane: If true, given policy label is converted to policy map index
    :return:
    """
    reset_metrics(metrics)
    for i, (data, value_label, policy_label) in enumerate(data_iterator):
        data = data.as_in_context(ctx)
        value_label = value_label.as_in_context(ctx)
        policy_label = policy_label.as_in_context(ctx)
        [value_out, policy_out] = net(data)
        value_out[0][0].wait_to_read()
        if apply_select_policy_from_plane:
            policy_out = policy_out[:, FLAT_PLANE_IDX]
        # update the metrics
        metrics["value_loss"].update(preds=value_out, labels=value_label)
        metrics["policy_loss"].update(preds=nd.SoftmaxActivation(policy_out),
                                      labels=policy_label)
        metrics["value_acc_sign"].update(preds=value_out, labels=value_label)
        metrics["policy_acc"].update(preds=nd.argmax(policy_out, axis=1),
                                     labels=policy_label if sparse_policy_label else nd.argmax(policy_label, axis=1))
        # stop after evaluating x batches (only recommended to use this for the train set evaluation)
        if nb_batches and i == nb_batches:
            break

    metric_values = {"loss": 0.01 * metrics["value_loss"].get()[1] + 0.99 * metrics["policy_loss"].get()[1]}

    for metric in metrics.values():
        metric_values[metric.get()[0]] = metric.get()[1]
    return metric_values
Beispiel #10
0
def evaluate_accuracy(data_iterator, model, model_ctx):
    acc = mx.metric.Accuracy()
    for ix, (xi, label) in enumerate(data_iterator):
        xi = xi.as_in_context(model_ctx)
        label = label.as_in_context(model_ctx)
        output = model(xi)
        predictions = nd.argmax(output, axis=1)
        acc.update(preds=predictions, labels=label)

    return acc
Beispiel #11
0
def evaluate_accuracy(data_iterator, net):
    acc = mx.metric.Accuracy()
    for i, (data, label) in enumerate(data_iterator):
        data = data.as_in_context(model_ctx).reshape((0, num_inputs))
        print(data.shape)
        label = label.as_in_context(model_ctx)
        output = net(data)
        predictions = nd.argmax(output, axis=1, keepdims=True)
        acc.update(preds=predictions, labels=label)
    return acc.get()[1]
Beispiel #12
0
 def decode_block(self, data_iter: DataLoader, docs: Sequence[Document], **kwargs):
     for dids, sids, data, label in tqdm(data_iter, leave=False):
         X = nd.transpose(data, axes=(1, 0, 2)).as_in_context(self.ctx)
         state = self.model.begin_state(batch_size=X.shape[1], ctx=self.ctx)
         output, state = self.model(X, state)
         for batch, preds in enumerate(nd.argmax(output, axis=2).T):
             sen = docs[dids[batch].asscalar()].sentences[sids[batch].asscalar()]
             sequence_length = len(sen)
             preds = [self.label_map.get(int(pred.asscalar())) for pred in preds[:sequence_length]]
             sen[self.key] = preds
Beispiel #13
0
def evaluate_accuracy(data_iterator, net):
    # returns accuracy as calculated by mx metric package
    acc = mx.metric.Accuracy()
    for i, (data, label) in enumerate(data_iterator):
        data = data.as_in_context(model_ctx).reshape((-1,784))
        label = label.as_in_context(model_ctx)
        output = net(data)
        prediction = nd.argmax(output, axis=1)
        acc.update(preds=prediction, labels=label)
    return acc.get()[1]
Beispiel #14
0
 def argmax(self, scores: Union[np.ndarray, NDArray]) -> str:
     """
     :param scores: the prediction scores of all labels.
     :return: the label with the maximum score.
     """
     if self.__len__() < len(scores):
         scores = scores[:self.__len__()]
     n = nd.argmax(scores, axis=0).asscalar() if isinstance(
         scores, NDArray) else np.argmax(scores)
     return self.get(int(n))
def eval_accuracy(data_iter, net, layer_params, numerator=0., denominator=0.):
    for i, (data, label) in enumerate(data_iter):
        data = data.as_in_context(ctx).reshape((-1, 784))
        label = label.as_in_context(ctx)
        out = net(data, layer_params)
        predictions = nd.argmax(out, axis=1)
        valid_pred = nd.sum(predictions == label)
        numerator = numerator + valid_pred
        denominator = denominator + data.shape[0]
    return (numerator / denominator).asscalar()
Beispiel #16
0
 def evaluate_accuracy(self, data_iterator, net):
     '''Given model and data, the model accuracy will be calculated.'''
     acc = metric.Accuracy()
     for i, (data, label) in enumerate(data_iterator):
         data = data.as_in_context(self.ctx).astype(self.precision)
         label = label.as_in_context(self.ctx).astype(self.precision)
         output = net(data)
         predictions = nd.argmax(output, axis=1)
         acc.update(preds=predictions, labels=label)
     return acc.get()[1]
Beispiel #17
0
def evaluate_accuracy(data_iterator, net):
    acc = mx.metric.Accuracy()
    data_iterator.reset()
    for i, batch in enumerate(data_iterator):
        data = batch.data[0].as_in_context(ctx)
        label = batch.label[0].as_in_context(ctx)
        output = net(data)
        predictions = nd.argmax(output, axis=1)
        acc.update(preds=predictions, labels=label)
    return acc.get()[1]
Beispiel #18
0
def train_accuracy(data_iter, network):
    acc = mx.metric.Accuracy()
    for i,(data, label) in enumerate(data_iter):
        data,label = transform(data,label)
        data = data.as_in_context(model_ctx)
        label = label.as_in_context(model_ctx)
        output = network(data)
        predictions = nd.argmax(output, axis=1)
        acc.update(preds=predictions, labels=label)
    return acc.get()[1]
Beispiel #19
0
def evaluate_accuracy(data_iterator, net):
    acc = mx.metric.Accuracy()
    for i, batch in enumerate(data_iterator):
        data = batch.data[0].as_in_context(
            model_ctx) / 255  #.reshape((-1,num_inputs))
        label = batch.label[0].as_in_context(model_ctx)
        output = net(data)
        predictions = nd.argmax(output, axis=1)
        acc.update(preds=predictions, labels=label)
    return acc.get()[1]
Beispiel #20
0
def evaluate_accuracy(data_iterator, net, ctx=mx.cpu()):
    acc = 0.
    if isinstance(data_iterator, mx.io.MXDataIter):
        data_iterator.reset()
    for i, batch in enumerate(data_iterator):
        data, label = _get_batch(batch, ctx)
        output = net(data)
        acc += nd.mean(nd.argmax(output, axis=1) == label).asscalar()

    return acc / (i + 1)
 def postprocess(self, data):
     # Post process and output the most likely category
     data = data[0]
     values = {
         val: float(int(data[i].asnumpy() * 1000) / 1000.0)
         for i, val in enumerate(self.labels)
     }
     index = int(nd.argmax(data, axis=0).asnumpy()[0])
     predicted = self.labels[index]
     return [{'predicted': predicted, 'confidence': values}]
Beispiel #22
0
 def _postprocess(self, data):
     data = data[0]
     softmax = nd.exp(data) / nd.sum(nd.exp(data))[0]
     values = {
         val: float(int(softmax[0][i].asnumpy() * 1000) / 1000.0)
         for i, val in enumerate(self.labels)
     }
     index = int(nd.argmax(data, axis=1).asnumpy()[0])
     predicted = self.labels[index]
     return {'predicted': predicted, 'confidence': values}
Beispiel #23
0
def evaluate_accuracy(data_iterator, net):
    acc = mx.metric.Accuracy()
    for i, (data, label) in enumerate(data_iterator):
        data = mx.ndarray.cast(data, dtype='float32')
        data = data.as_in_context(ctx).reshape((-1, num_inputs))
        label = label.as_in_context(ctx)
        output = net(data)
        predictions = nd.argmax(output, axis=1)
        acc.update(preds=predictions, labels=label)
    return acc.get()[1]
Beispiel #24
0
    def decode(self, x):
        batch_size = x.shape[0]
        state = self.init_hidden(batch_size, self.ctx)
        outputs_pgm = []
        outputs_param = []

        for i in range(self.seq_length):
            if i == 0:
                xt = x
            else:
                prob_pre = nd.exp(outputs_pgm[-1])
                it1 = nd.argmax(prob_pre, axis=1)
                #print("it1 decode:",it1)
                xt = self.pgm_embed(it1)
            #print("xt decode:",xt)
            output, state = self.core(xt.expand_dims(axis=0), state)

            pgm_feat1 = nd.relu(self.logit1(output.squeeze(0)))
            pgm_feat2 = self.logit2(pgm_feat1)
            pgm_score = nd.log_softmax(pgm_feat2, axis=1)

            trans_prob = nd.softmax(pgm_feat2, axis=1).detach()
            param_feat1 = nd.relu(self.regress1(output.squeeze(0)))
            param_feat2 = nd.concat(trans_prob, param_feat1, dim=1)
            param_score = self.regress2(param_feat2)
            param_score = param_score.reshape(batch_size, self.vocab_size + 1,
                                              self.max_param)

            index = nd.argmax(trans_prob, axis=1)
            index = index.expand_dims(axis=1).expand_dims(axis=2).broadcast_to(
                shape=(batch_size, 1, self.max_param)).detach()  ##
            param_score = nd.pick(param_score, index, 1)

            outputs_pgm.append(pgm_score)
            outputs_param.append(param_score)
        outputs_pgm = [_.expand_dims(axis=1) for _ in outputs_pgm]
        outputs_param = [_.expand_dims(axis=1) for _ in outputs_param]
        pgms = outputs_pgm[0]
        params = outputs_param[0]
        for i in range(1, len(outputs_pgm)):
            pgms = nd.concat(pgms, outputs_pgm[i], dim=1)
            params = nd.concat(params, outputs_param[i], dim=1)
        return [pgms, params]
Beispiel #25
0
    def forward(self, x):
        root = next(iter(self._structure.items()))[0]

        if (len(self._routerlayer) > 0):
            router_d, router_mat_d, weight_d, embedd_d = self._contextify(x)(
                root)

            # router = nd.stack(*[router_d[key] for key in sorted(router_d)], axis = -1)
            # weight = nd.stack(*[weight_d[key] for key in sorted(weight_d)], axis = -1)
            #
            # embedd = nd.stack(*[embedd_d[key] for key in sorted(embedd_d)], axis = 0)
            # router_mat = nd.stack(
            #   *[router_mat_d[key] for key in sorted(router_mat_d)], axis = 1)
            #
            # presence = nd.sum(router_mat, axis = 2)
            # weight_adj = presence * weight
            # depth = len(self._weightlayer) - nd.topk(nd.reverse(presence, axis = 1))
            # depth = depth - 1
            # depth = depth[:, 0]
            # remainder = 1 - nd.sum(weight_adj, axis = 1)
            #
            # if (mx.autograd.is_training()):
            #   # remainder = remainder + nd.choose_element_0index(weight_adj, depth)
            #   remainder = remainder + nd.concat(
            #     *[x[d] for d, x in zip(depth, weight_adj)], dim = 0)
            #   # weight_adj = nd.fill_element_0index(weight_adj, remainder, depth)
            #   weight_adj = nd.stack(
            #     *[nd.concat(*[y if i != d else r for i, y in enumerate(x)], dim = 0)
            #         for d, r, x in zip(depth, remainder, weight_adj)
            #       ], axis = 0)
            # else:
            #   remainder = remainder + nd.choose_element_0index(weight_adj, depth)
            #   weight_adj = nd.fill_element_0index(weight_adj, remainder, depth)
            #
            # head = nd.sum(nd.expand_dims(weight_adj, axis = 2) * router_mat, axis = 1)
            #
            # return nd.dot(head, embedd)

            embedd = nd.stack(*[embedd_d[key] for key in sorted(embedd_d)],
                              axis=0)
            router = nd.stack(*[router_d[key] for key in sorted(router_d)],
                              axis=-1)
            router_mat = nd.stack(
                *[router_mat_d[key] for key in sorted(router_mat_d)], axis=1)

            where = nd.argmax(nd.maximum(0, 1 / (router + 0.5)), axis=1)

            head = nd.concat(*[router_mat[i][k] for i, k in enumerate(where)],
                             dim=0)

            return nd.dot(head, embedd)

        else:
            head = nd.ones_like(nd.slice_axis(x, axis=1, begin=0, end=None))
            return self._contextify(x)(root) * head
Beispiel #26
0
def predict(net, data, label):
    data = nd.array(data)
    label = nd.array(label)
    hidden = net.begin_state(func=mx.nd.zeros,batch_size = data.shape[0],ctx=mx.cpu())
    dd = nd.array(data.reshape((data.shape[0],5,11)).swapaxes(0,1))
    output,hidden = net(dd,hidden)
    output = output.reshape((5,data.shape[0],2))
    output = nd.sum(output,axis=0)/5
    l = nd.argmax(output, axis=1)
    res = nd.mean(l==label)
    return res.asscalar()
Beispiel #27
0
def compute_accuracy(data_iter, model, weights, cntx):
    num = 0
    den = 0
    for i, (X, Y) in enumerate(data_iter):
        data = X.as_in_context(cntx).reshape((-1, X.shape[1]))
        label = Y.as_in_context(cntx)
        Y_hat = model(data, weights[0], weights[1])
        classified = nd.argmax(Y_hat, axis=1).reshape((-1, 1))
        num += nd.sum(classified == label)
        den += data.shape[0]
    return (num / den).asscalar()
Beispiel #28
0
 def _mask(self, output: nd.NDArray, test_citys: bool = True):
     """get mask by output"""
     predict = nd.squeeze(nd.argmax(output, 1)).asnumpy()
     if test_citys:
         from mxnetseg.tools import city_train2label
         mask = city_train2label(predict)
     else:
         from mxnetseg.tools import get_color_palette
         mask = get_color_palette(predict,
                                  color_palette[self.data_name.lower()])
     return mask
Beispiel #29
0
def predict(net, data, label):
    data = nd.array(data)
    label = nd.array(label)
    hidden = net.begin_state(func=mx.nd.zeros,batch_size = data.shape[0],ctx=mx.cpu())
    dd = nd.array(data.reshape((data.shape[0],5,11)).swapaxes(0,1))
    output,hidden = net(dd,hidden)
    output = output.reshape((5,data.shape[0],1))
    output = nd.sum(output,axis=0)/5
    l = nd.argmax(output, axis=1)
    res = nd.mean(l==label)
    return res.asscalar()
Beispiel #30
0
    def update(self, labels, preds):
        for label, pred in zip(labels, preds):
            pred = nd.argmax(pred, axis=self.axis)
            check_shapes(label, pred)
            pred = pred.asnumpy().astype('int32')
            label = label.asnumpy().astype('int32')
            label = label.flat
            pred = pred.flat

            self.sum_metric += (pred == label).sum()
            self.num_inst += 1
Beispiel #31
0
def predict():
    model = mlp()
    model.load_params("mxnet.model", ctx)
    acc = mx.metric.Accuracy()
    for i, (data, label) in enumerate(test_data):
        data = data.as_in_context(ctx).reshape((-1, inputs))
        label = label.as_in_context(ctx)
        output = model(data)
        predictions = nd.argmax(output, axis=1)
        acc.update(preds=predictions, labels=label)
    print("accuracy: %.2f%%" % (acc.get()[1] * 100))
def evaluate_accuracy(data_iter, network):
    acc = mx.metric.Accuracy()
    for i, (data, label) in enumerate(data_iter):
        data, label = transform(data, label)
        data = data.as_in_context(model_ctx)
        label = label.as_in_context(model_ctx)
        act_output = get_the_hiddenlayer_outputs(no_layers, data)
        output = network(act_output)
        predictions = nd.argmax(output, axis=1)
        acc.update(preds=predictions, labels=label)
    return acc.get()[1]
Beispiel #33
0
def eval_acc(net, data_iterator):
    num = 0.0
    denum = 0.0
    for i, (data, label) in enumerate(data_iterator):
        data = data.as_in_context(ctx)
        label = label.as_in_context(ctx)
        output = net(data)
        predictions = nd.argmax(output, axis=1)
        num += nd.sum(predictions == label)
        denum += data.shape[0]
    return (num / denum).asscalar()
Beispiel #34
0
def evaluate_accuracy(data_iterator, net, model_params):
    numerator = 0.
    denominator = 0.
    for i, (data, label) in enumerate(data_iterator):
        data = data.as_in_context(model_ctx).reshape((-1, 784))
        label = label.as_in_context(model_ctx)
        output = net(data, model_params[0], model_params[1])
        predictions = nd.argmax(output, axis=1)
        numerator += nd.sum(predictions == label)
        denominator += data.shape[0]
    return (numerator / denominator).asscalar()
Beispiel #35
0
def evaluate_accuracy(data_iterator, net):

    acc = mx.metric.Accuracy()

    # Iterate through data and label
    for i, (data, label) in enumerate(data_iterator):

        # Get the data and label into the GPU
        data = data.as_in_context(ctx[0])
        label = label.as_in_context(ctx[0])

        # Get network's output which is a probability distribution
        # Apply argmax on the probability distribution to get network's classification.
        output = net(data)
        predictions = nd.argmax(output, axis=1)

        # Give network's prediction and the correct label to update the metric
        acc.update(preds=predictions, labels=label)

    # Return the accuracy
    return acc.get()[1]
Beispiel #36
0
def get_max_pred(batch_heatmaps):
    batch_size = batch_heatmaps.shape[0]
    num_joints = batch_heatmaps.shape[1]
    width = batch_heatmaps.shape[3]
    heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
    idx = nd.argmax(heatmaps_reshaped, 2)
    maxvals = nd.max(heatmaps_reshaped, 2)

    maxvals = maxvals.reshape((batch_size, num_joints, 1))
    idx = idx.reshape((batch_size, num_joints, 1))

    preds = nd.tile(idx, (1, 1, 2)).astype(np.float32)

    preds[:, :, 0] = (preds[:, :, 0]) % width
    preds[:, :, 1] = nd.floor((preds[:, :, 1]) / width)

    pred_mask = nd.tile(nd.greater(maxvals, 0.0), (1, 1, 2))
    pred_mask = pred_mask.astype(np.float32)

    preds *= pred_mask
    return preds, maxvals
def evaluate(net, dataloader):
    """Evaluate network on the specified dataset"""
    total_L = 0.0
    total_sample_num = 0
    total_correct_num = 0
    start_log_interval_time = time.time()
    print('Begin Testing...')
    for i, (data, label) in enumerate(dataloader):
        data = mx.nd.transpose(data.as_in_context(context))
        label = label.as_in_context(context)
        output = net(data)
        L = loss(output, label)
        pred = nd.argmax(output, axis=1)
        total_L += L.sum().asscalar()
        total_sample_num += label.shape[0]
        total_correct_num += (pred.astype('int') == label).sum().asscalar()
        if (i + 1) % args.log_interval == 0:
            print('[Batch {}/{}] elapsed {:.2f} s'.format(
                i + 1, len(dataloader), time.time() - start_log_interval_time))
            start_log_interval_time = time.time()
    avg_L = total_L / float(total_sample_num)
    acc = total_correct_num / float(total_sample_num)
    return avg_L, acc
               'dog', 'frog', 'horse', 'ship', 'truck']

context = [mx.cpu()]

# Load Model
model_name = opt.model
pretrained = True if opt.saved_params == '' else False
kwargs = {'classes': classes, 'pretrained': pretrained}
net = get_model(model_name, **kwargs)

if not pretrained:
    net.load_parameters(opt.saved_params, ctx = context)

# Load Images
img = image.imread(opt.input_pic)

# Transform
transform_fn = transforms.Compose([
    transforms.Resize(32),
    transforms.CenterCrop(32),
    transforms.ToTensor(),
    transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])

img = transform_fn(img)
pred = net(img.expand_dims(0))

ind = nd.argmax(pred, axis=1).astype('int')
print('The input picture is classified to be [%s], with probability %.3f.'%
      (class_names[ind.asscalar()], nd.softmax(pred)[0][ind].asscalar()))
def predict_sentiment(net, vocab, sentence):
    """Predict the sentiment of a given sentence."""
    sentence = nd.array([vocab.token_to_idx[token] for token in sentence],
                        ctx=try_gpu())
    label = nd.argmax(net(nd.reshape(sentence, shape=(1, -1))), axis=1)
    return 'positive' if label.asscalar() == 1 else 'negative'
Beispiel #40
0
def accuracy(output, labels):
    return nd.mean(nd.argmax(output, axis=1) == labels).asscalar()
Beispiel #41
0
 def forward_single_out(self, data, cond=None, logged=False):
     out = (self.forward_logged if logged else self)(data)
     if cond is None:
         cond = nd.argmax(out, axis=1)
     cond = nd.one_hot(cond, out.shape[1])
     return cond * out
Beispiel #42
0
def textify(embedding):
    result = ""
    indices = nd.argmax(embedding, axis=1).asnumpy()
    for idx in indices:
        result += character_list[int(idx)]
    return result
Beispiel #43
0
def predict_sentiment(net, vocab, sentence):
    """Predict the sentiment of a given sentence."""
    sentence = nd.array(vocab.to_indices(sentence), ctx=try_gpu())
    label = nd.argmax(net(sentence.reshape((1, -1))), axis=1)
    return 'positive' if label.asscalar() == 1 else 'negative'