コード例 #1
0
ファイル: utils.py プロジェクト: pigtamer/myssd
def train_gluon_ch7(trainer_name,
                    trainer_hyperparams,
                    features,
                    labels,
                    batch_size=10,
                    num_epochs=2):
    """Train a linear regression model with a given Gluon trainer."""
    net = nn.Sequential()
    net.add(nn.Dense(1))
    net.initialize(init.Normal(sigma=0.01))
    loss = gloss.L2Loss()

    def eval_loss():
        return loss(net(features), labels).mean().asscalar()

    ls = [eval_loss()]
    data_iter = gdata.DataLoader(gdata.ArrayDataset(features, labels),
                                 batch_size,
                                 shuffle=True)
    trainer = gluon.Trainer(net.collect_params(), trainer_name,
                            trainer_hyperparams)
    for _ in range(num_epochs):
        start = time.time()
        for batch_i, (X, y) in enumerate(data_iter):
            with autograd.record():
                l = loss(net(X), y)
            l.backward()
            trainer.step(batch_size)
            if (batch_i + 1) * batch_size % 100 == 0:
                ls.append(eval_loss())
    print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start))
    set_figsize()
    plt.plot(np.linspace(0, num_epochs, len(ls)), ls)
    plt.xlabel('epoch')
    plt.ylabel('loss')
コード例 #2
0
ファイル: utils.py プロジェクト: pigtamer/myssd
def train_ch3(net,
              train_iter,
              test_iter,
              loss,
              num_epochs,
              batch_size,
              params=None,
              lr=None,
              trainer=None):
    """Train and evaluate a model with CPU."""
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
        for X, y in train_iter:
            with autograd.record():
                y_hat = net(X)
                l = loss(y_hat, y).sum()
            l.backward()
            if trainer is None:
                sgd(params, lr, batch_size)
            else:
                trainer.step(batch_size)
            y = y.astype('float32')
            train_l_sum += l.asscalar()
            train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
            n += y.size
        test_acc = evaluate_accuracy(test_iter, net)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f' %
              (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
コード例 #3
0
ファイル: utils.py プロジェクト: pigtamer/myssd
def train(train_iter, test_iter, net, loss, trainer, ctx, num_epochs):
    """Train and evaluate a model."""
    print('training on', ctx)
    if isinstance(ctx, mx.Context):
        ctx = [ctx]
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n, m, start = 0.0, 0.0, 0, 0, time.time()
        for i, batch in enumerate(train_iter):
            Xs, ys, batch_size = _get_batch(batch, ctx)
            ls = []
            with autograd.record():
                y_hats = [net(X) for X in Xs]
                ls = [loss(y_hat, y) for y_hat, y in zip(y_hats, ys)]
            for l in ls:
                l.backward()
            trainer.step(batch_size)
            train_l_sum += sum([l.sum().asscalar() for l in ls])
            n += sum([l.size for l in ls])
            train_acc_sum += sum([(y_hat.argmax(axis=1) == y).sum().asscalar()
                                  for y_hat, y in zip(y_hats, ys)])
            m += sum([y.size for y in ys])
        test_acc = evaluate_accuracy(test_iter, net, ctx)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, '
              'time %.1f sec' % (epoch + 1, train_l_sum / n, train_acc_sum / m,
                                 test_acc, time.time() - start))
コード例 #4
0
ファイル: train.py プロジェクト: happog/Gluon-PSENet
def train(data_dir, pretrain_model, epoches=3, lr=0.001, wd=5e-4,  momentum=0.9, batch_size=5, ctx=mx.cpu(), verbose_step=2, ckpt='ckpt'):

    icdar_loader = ICDAR(data_dir=data_dir)
    loader = DataLoader(icdar_loader, batch_size=batch_size, shuffle=True)
    net = PSENet(num_kernels=7, ctx=ctx)
    # initial params
    net.collect_params().initialize(mx.init.Normal(sigma=0.01), ctx=ctx)
    # net.initialize(ctx=ctx)
    # net.load_parameters(pretrain_model, ctx=ctx, allow_missing=True, ignore_extra=True)
    pse_loss = DiceLoss(lam=0.7)

    cos_shc = ls.PolyScheduler(max_update=icdar_loader.length * epoches//batch_size, base_lr=lr)
    trainer = Trainer(
        net.collect_params(), 
        'sgd', 
        {
            'learning_rate': lr, 
            'wd': wd,
            'momentum': momentum, 
            'lr_scheduler':cos_shc
        })
    summary_writer = SummaryWriter(ckpt)
    for e in range(epoches):
        cumulative_loss = 0

        for i, item in enumerate(loader):
            im, score_maps, kernels, training_masks, ori_img = item
            
            im = im.as_in_context(ctx)
            score_maps = score_maps[:, ::4, ::4].as_in_context(ctx)
            kernels = kernels[:, ::4, ::4, :].as_in_context(ctx)
            training_masks = training_masks[:, ::4, ::4].as_in_context(ctx)

            with autograd.record():
                kernels_pred = net(im)
                
                loss = pse_loss(score_maps, kernels, kernels_pred, training_masks)
                loss.backward()
            trainer.step(batch_size)
            if i%verbose_step==0:
                global_steps = icdar_loader.length * e + i * batch_size
                summary_writer.add_image('score_map', score_maps[0:1, :, :], global_steps)
                summary_writer.add_image('score_map_pred', kernels_pred[0:1, -1, :, :], global_steps)
                summary_writer.add_image('kernel_map', kernels[0:1, :, :, 0], global_steps)
                summary_writer.add_image('kernel_map_pred', kernels_pred[0:1, 0, :, :], global_steps)
                summary_writer.add_scalar('loss', mx.nd.mean(loss).asscalar(), global_steps)
                summary_writer.add_scalar('c_loss', mx.nd.mean(pse_loss.C_loss).asscalar(), global_steps)
                summary_writer.add_scalar('kernel_loss', mx.nd.mean(pse_loss.kernel_loss).asscalar(), global_steps)
                summary_writer.add_scalar('pixel_accuracy', pse_loss.pixel_acc, global_steps)
                print("step: {}, loss: {}, score_loss: {}, kernel_loss: {}, pixel_acc: {}".format(i * batch_size, mx.nd.mean(loss).asscalar(), \
                    mx.nd.mean(pse_loss.C_loss).asscalar(), mx.nd.mean(pse_loss.kernel_loss).asscalar(), \
                        pse_loss.pixel_acc))
            cumulative_loss += mx.nd.mean(loss).asscalar()
        print("Epoch {}, loss: {}".format(e, cumulative_loss))
        net.save_parameters(os.path.join(ckpt, 'model_{}.param'.format(e)))
    summary_writer.close()
コード例 #5
0
ファイル: utils.py プロジェクト: pigtamer/myssd
def train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
                                corpus_indices, idx_to_char, char_to_idx,
                                num_epochs, num_steps, lr, clipping_theta,
                                batch_size, pred_period, pred_len, prefixes):
    """Train an Gluon RNN model and predict the next item in the sequence."""
    loss = gloss.SoftmaxCrossEntropyLoss()
    model.initialize(ctx=ctx, force_reinit=True, init=init.Normal(0.01))
    trainer = gluon.Trainer(model.collect_params(), 'sgd', {
        'learning_rate': lr,
        'momentum': 0,
        'wd': 0
    })

    for epoch in range(num_epochs):
        l_sum, n, start = 0.0, 0, time.time()
        data_iter = data_iter_consecutive(corpus_indices, batch_size,
                                          num_steps, ctx)
        state = model.begin_state(batch_size=batch_size, ctx=ctx)
        for X, Y in data_iter:
            for s in state:
                s.detach()
            with autograd.record():
                (output, state) = model(X, state)
                y = Y.T.reshape((-1, ))
                l = loss(output, y).mean()
            l.backward()
            params = [p.data() for p in model.collect_params().values()]
            grad_clipping(params, clipping_theta, ctx)
            trainer.step(1)
            l_sum += l.asscalar() * y.size
            n += y.size

        if (epoch + 1) % pred_period == 0:
            print('epoch %d, perplexity %f, time %.2f sec' %
                  (epoch + 1, math.exp(l_sum / n), time.time() - start))
            for prefix in prefixes:
                print(
                    ' -',
                    predict_rnn_gluon(prefix, pred_len, model, vocab_size, ctx,
                                      idx_to_char, char_to_idx))
コード例 #6
0
ファイル: utils.py プロジェクト: pigtamer/myssd
def train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx,
              num_epochs):
    """Train and evaluate a model with CPU or GPU."""
    print('training on', ctx)
    loss = gloss.SoftmaxCrossEntropyLoss()
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
        for X, y in train_iter:
            X, y = X.as_in_context(ctx), y.as_in_context(ctx)
            with autograd.record():
                y_hat = net(X)
                l = loss(y_hat, y).sum()
            l.backward()
            trainer.step(batch_size)
            y = y.astype('float32')
            train_l_sum += l.asscalar()
            train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
            n += y.size
        test_acc = evaluate_accuracy(test_iter, net, ctx)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, '
              'time %.1f sec' % (epoch + 1, train_l_sum / n, train_acc_sum / n,
                                 test_acc, time.time() - start))
コード例 #7
0
def test(ctx=mx.cpu()):
    net = MySSD(1, num_anchors)
    net.initialize(init="Xavier", ctx=ctx)
    net.hybridize()
    # print(net)
    # x = nd.random.normal(0,1,(100,3,256,256), ctx=ctx)
    # net(x)
    batch_size, edge_size = args.batch_size, args.input_size
    train_iter, _ = predata.load_data_uav(args.data_path, batch_size,
                                          edge_size)
    batch = train_iter.next()
    batch.data[0].shape, batch.label[0].shape

    if batch_size >= 25:  # show f*****g pikachuus in grid
        imgs = (batch.data[0][0:25].transpose((0, 2, 3, 1))) / 255
        axes = utils.show_images(imgs, 5, 5).flatten()
        for ax, label in zip(axes, batch.label[0][0:25]):
            utils.show_bboxes(ax, [label[0][1:5] * edge_size], colors=['w'])

        plt.show()

    # net.initialize(init=init.Xavier(), ctx=ctx)
    trainer = mx.gluon.Trainer(net.collect_params(), 'sgd', {
        'learning_rate': 0.2,
        'wd': 5e-4
    })
    cls_loss = gloss.SoftmaxCrossEntropyLoss()
    bbox_loss = gloss.L1Loss()

    def calc_loss(cls_preds, cls_labels, bbox_preds, bbox_labels, bbox_masks):
        cls = cls_loss(cls_preds, cls_labels)
        bbox = bbox_loss(bbox_preds * bbox_masks, bbox_labels * bbox_masks)
        return cls + bbox

    def cls_eval(cls_preds, cls_labels):
        # the result from class prediction is at the last dim
        # argmax() should be assigned with the last dim of cls_preds
        return (cls_preds.argmax(axis=-1) == cls_labels).sum().asscalar()

    def bbox_eval(bbox_preds, bbox_labels, bbox_masks):
        return ((bbox_labels - bbox_preds) * bbox_masks).abs().sum().asscalar()

    IF_LOAD_MODEL = args.load
    if IF_LOAD_MODEL:
        net.load_parameters(args.model_path)
    else:
        for epoch in range(args.num_epoches):
            acc_sum, mae_sum, n, m = 0.0, 0.0, 0, 0
            train_iter.reset(
            )  # reset data iterator to read-in images from beginning
            start = time.time()
            for batch in train_iter:
                X = batch.data[0].as_in_context(ctx)
                Y = batch.label[0].as_in_context(ctx)
                with autograd.record():
                    # generate anchors and generate bboxes
                    im, anchors, cls_preds, bbox_preds = net(X)
                    # assign classes and bboxes for each anchor
                    bbox_labels, bbox_masks, cls_labels = nd.contrib.MultiBoxTarget(
                        anchors, Y, cls_preds.transpose((0, 2, 1)))
                    # calc loss
                    l = calc_loss(cls_preds, cls_labels, bbox_preds,
                                  bbox_labels, bbox_masks)
                l.backward()
                trainer.step(batch_size)
                acc_sum += cls_eval(cls_preds, cls_labels)
                n += cls_labels.size
                mae_sum += bbox_eval(bbox_preds, bbox_labels, bbox_masks)
                m += bbox_labels.size

            if (epoch + 1) % 1 == 0:
                print(
                    'epoch %2d, class err %.2e, bbox mae %.2e, time %.1f sec' %
                    (epoch + 1, 1 - acc_sum / n, mae_sum / m,
                     time.time() - start))

    net.save_parameters("myssd.params")

    def predict(X):
        im, anchors, cls_preds, bbox_preds = net(X.as_in_context(ctx))
        # im = im.transpose((2, 3, 1, 0)).asnumpy()
        # imgs = [im[1:-2,1:-2, k, 0] for k in range(256)] # why are there boundary effect?

        # utils.show_images_np(imgs, 16, 16)
        # plt.show()
        # plt.savefig("./activdation/figbase%s"%nd.random.randint(0,100,1).asscalar())

        # plt.imshow(nd.sum(nd.array(im[1:-2, 1:-2, :, :]), axis=2).asnumpy()[:, :, 0], cmap='gray')
        # plt.savefig("./suming_act")
        cls_probs = cls_preds.softmax().transpose((0, 2, 1))
        output = contrib.nd.MultiBoxDetection(cls_probs, bbox_preds, anchors)
        idx = [i for i, row in enumerate(output[0]) if row[0].asscalar() != -1]
        if idx == []:
            raise ValueError("NO TARGET. Seq Terminated.")
        return output[0, idx]

    def display(img, output, threshold):
        lscore = []
        for row in output:
            lscore.append(row[1].asscalar())
        for row in output:
            score = row[1].asscalar()
            if score < min(max(lscore), threshold):
                continue
            h, w = img.shape[0:2]
            bbox = [row[2:6] * nd.array((w, h, w, h), ctx=row.context)]
            cv.rectangle(img, (bbox[0][0].asscalar(), bbox[0][1].asscalar()),
                         (bbox[0][2].asscalar(), bbox[0][3].asscalar()),
                         (0, 255, 0), 3)
            cv.imshow("res", img)
            cv.waitKey(60)

    cap = cv.VideoCapture(args.test_path)
    rd = 0
    while True:
        ret, frame = cap.read()
        img = nd.array(frame)
        feature = image.imresize(img, 512, 512).astype('float32')
        X = feature.transpose((2, 0, 1)).expand_dims(axis=0)

        countt = time.time()
        output = predict(X)
        if rd == 0: net.export('ssd')
        countt = time.time() - countt
        print("SPF: %3.2f" % countt)

        utils.set_figsize((5, 5))

        display(frame / 255, output, threshold=0.8)
        plt.show()
        rd += 1