Ejemplo n.º 1
0
def evaluate(net, data_iter, ctx):
    loss, acc, n = 0., 0., 0.
    steps = len(data_iter)
    for data, label in data_iter:
        data, label = data.as_in_context(ctx), label.as_in_context(ctx)
        output = net(data)
        acc += accuracy(output, label)
        loss += nd.mean(softmax_cross_entropy(output, label)).asscalar()
    return loss / steps, acc / steps
Ejemplo n.º 2
0
    def update(self, labels: nd.array, preds: nd.array) -> float:
        metric.check_label_shapes(labels, preds)

        label = nd.array(
            reformat(labels[1]).asnumpy().ravel().round().astype(np.int))
        pred = nd.flatten(preds[1]).T.as_in_context(label.context)
        error = nd.softmax_cross_entropy(pred, label).asscalar()
        self.sum_metric += error
        self.num_inst += NUM_ANCHORS
        return error / NUM_ANCHORS
Ejemplo n.º 3
0
def get_loss(data, net, ctx):
    loss = 0.0
    for feas, label in data:
        # epoch
        # return an array in the target device ctx with the same value as this array.
        label = label.as_in_context(ctx)
        # compute the output
        output_features = net.features(feas.as_in_context(ctx))
        # Every row holds an example
        # output_features_norm = [out / np.sqrt(np.sum(np.square(out))) for out in output_features]
        output = net.output_new(output_features)
        cross_entropy = nd.softmax_cross_entropy(output, label)
        loss += nd.mean(cross_entropy).asscalar()
    return loss / len(data)
Ejemplo n.º 4
0
    def neg_log_likelihood(self, sentences: List[Sentence], embed_ctx=None):
        feats, tags, lens_ = self.forward(sentences, embed_ctx=embed_ctx)

        if self.use_crf:

            forward_score = self._forward_alg(feats, lens_)
            gold_score = self._score_sentence(feats, tags, lens_)

            score = forward_score - gold_score

            return score.sum()

        else:

            score = nd.softmax_cross_entropy(feats.reshape([-1, feats.shape[-1]]), tags.astype('float32').reshape([-1]))
            return score
Ejemplo n.º 5
0
def create_model(gpu):
    if gpu:
        ctx = mx.gpu()
    else:
        ctx = mx.cpu()

    data_dir = 'data'
    batch_size = 128
    learning_rate = 1e-3
    epochs = int(input("Epoch? "))
    lr_decay = 0.95
    lr_decay2 = 0.8
    lr_period = 100
    models_dir = "models"

    synset = np.unique(
        pd.read_csv(os.path.join(data_dir, 'labels.csv')).breed).tolist()
    n = len(glob(os.path.join('.', data_dir, 'Images', '*', '*.jpg')))

    y = nd.zeros((n, ))
    print('Aggregating labels')
    for i, file_name in tqdm(enumerate(
            glob(os.path.join('.', data_dir, 'Images', '*', '*.jpg'))),
                             total=n):
        y[i] = synset.index(file_name.split('/')[3][10:].lower())
        nd.waitall()

    features = [nd.load(os.path.join(models_dir, 'features_incep.nd'))[0], \
                nd.load(os.path.join(models_dir, 'features_res.nd'))[0]]
    features = nd.concat(*features, dim=1)

    data_iter_train = gluon.data.DataLoader(gluon.data.ArrayDataset(
        features, y),
                                            batch_size,
                                            shuffle=True)
    softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
    net = build_model(ctx)
    trainer = gluon.Trainer(net.collect_params(), 'adam',
                            {'learning_rate': learning_rate})

    print('Starting training')
    for epoch in range(epochs):
        if epoch <= lr_period:
            trainer.set_learning_rate(trainer.learning_rate * lr_decay)
        else:
            trainer.set_learning_rate(trainer.learning_rate * lr_decay2)
        train_loss = 0.
        train_acc = 0.
        steps = len(data_iter_train)
        for data, label in data_iter_train:
            data, label = data.as_in_context(ctx), label.as_in_context(ctx)
            with autograd.record():
                output = net(data)
                loss = softmax_cross_entropy(output, label)
            loss.backward()
            trainer.step(batch_size)
            train_loss += nd.mean(loss).asscalar()
            train_acc += accuracy(output, label)

        val_loss, val_acc = evaluate(net, data_iter_train, ctx)

        print(
            "Epoch %d. loss: %.4f, acc: %.2f%%, val_loss %.4f, val_acc %.2f%%"
            % (epoch + 1, train_loss / steps, train_acc / steps * 100,
               val_loss, val_acc * 100))

    print('Model saved under ' + models_dir + '/model' + str(epochs) +
          '.params')
    net.save_parameters(
        os.path.join(models_dir, 'model' + str(epochs) + '.params'))
    input("Press Enter to continue...")