コード例 #1
0
def SVM(train_data,
        train_label,
        test_data,
        test_label,
        kernel='rbf',
        gamma='scale',
        C=1.0,
        degree=3,
        max_iter=-1):

    clf = svm.SVC(C=C,
                  kernel=kernel,
                  gamma=gamma,
                  degree=degree,
                  max_iter=max_iter,
                  decision_function_shape='ovr')

    clf.fit(train_data.values, train_label)
    pred = clf.predict(test_data.values)
    acc = utils.evaluate_accuracy(pred, test_label, p=True)

    # evaluate each category
    cat1, cat2, cat3 = [], [], []
    cat = [cat1, cat2, cat3]
    for i, l in enumerate(test_label):
        cat[l].append(i)
    for i, c in enumerate(cat):
        ctest, clabels = test_data.values[c], test_label[c]
        pred = clf.predict(ctest)
        acc = utils.evaluate_accuracy(pred, clabels, p=False)
        print('Category %d, accuracy: %.4f' % (i, acc))
    return clf
コード例 #2
0
def RandomForest(train_data,
                 train_label,
                 test_data,
                 test_label,
                 n_estimator='warn',
                 random_state=53):

    rf = RandomForestClassifier(n_estimators=n_estimator,
                                random_state=random_state)
    rf.fit(train_data.values, train_label)
    print('Feature importances: ', rf.feature_importances_)
    pred = rf.predict(test_data.values)
    acc = utils.evaluate_accuracy(pred, test_label, p=True)

    # evaluate each category
    cat1, cat2, cat3 = [], [], []
    cat = [cat1, cat2, cat3]
    for i, l in enumerate(test_label):
        cat[l].append(i)
    for i, c in enumerate(cat):
        ctest, clabels = test_data.values[c], test_label[c]
        pred = rf.predict(ctest)
        acc = utils.evaluate_accuracy(pred, clabels, p=False)
        print('Category %d, accuracy: %.4f' % (i, acc))
    return rf
コード例 #3
0
def test():
    print('Start to test...')
    ctx = [mx.gpu(int(i))
           for i in args.gpus.split(',')] if args.gpus != '-1' else mx.cpu()

    _, test_iter = data_loader(args.batch_size)

    model = LeNetPlus()
    model.load_params(os.path.join(args.ckpt_dir,
                                   args.prefix + '-best.params'),
                      ctx=ctx)

    start_time = timeit.default_timer()
    test_accuracy, features, predictions, labels = evaluate_accuracy(
        test_iter, model, ctx)
    elapsed_time = timeit.default_timer() - start_time

    print("Test_acc: %s, Elapsed_time: %f s" % (test_accuracy, elapsed_time))

    if not os.path.exists(args.out_dir):
        os.makedirs(args.out_dir)

    if args.plotting:
        plot_features(features,
                      labels,
                      num_classes=args.num_classes,
                      fpath=os.path.join(args.out_dir, '%s.png' % args.prefix))
コード例 #4
0
def train():
    batch_size = 256
    train_data, test_data = utils.load_data_fashion_mnist(batch_size)

    softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()

    learning_rate = 0.2

    for epoch in range(5):
        train_loss = 0.
        train_acc = 0.
        for data, label in train_data:
            label = label.as_in_context(ctx)
            with autograd.record():
                output = net(data, is_training=True)
                loss = softmax_cross_entropy(output, label)
            loss.backward()
            utils.SGD(params, learning_rate / batch_size)

            train_loss += nd.mean(loss).asscalar()
            train_acc += utils.accuracy(output, label)

        test_acc = utils.evaluate_accuracy(test_data, net, ctx)
        print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" %
              (epoch, train_loss / len(train_data),
               train_acc / len(train_data), test_acc))
コード例 #5
0
def train(lr, num_channel):
    net = MVTecCNN_BO(num_channel).to(device)
    criterion = nn.BCELoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    best_val_acc = 0.
    num_epoch = 25

    scheduler = StepLR(optimizer, step_size=5, gamma=0.5)

    for epoch in range(num_epoch):
        loss_count = 0
        loss_sum = 0
        for idx, (img, label) in enumerate(train_loader):
            img = img.to(device)
            label = label.to(device, dtype=torch.float)
            label = label.view(-1, 1)
            pred = net(img)
            optimizer.zero_grad()
            loss = criterion(pred, label)
            loss.backward()
            optimizer.step()

            loss_sum += loss.item()
            loss_count += 1
            if idx % 10 == 0:
                val_acc = evaluate_accuracy(net, valid_loader, device)
                if val_acc > best_val_acc:
                    best_val_acc = val_acc
                best_model = copy.deepcopy(net)
        scheduler.step()

    save_esemble_models(best_val_acc, best_model.eval())
    return best_val_acc
コード例 #6
0
def train_ch3(net,
              train_iter,
              test_iter,
              loss,
              num_epochs,
              batch_size,
              params=None,
              lr=None,
              optimizer=None):
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
        for X, y in train_iter:
            y_hat = net(X)
            l = loss(y_hat, y).sum()

            # 梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()

            l.backward()
            if optimizer is None:
                d2l.sgd(params, lr, batch_size)
            else:
                optimizer.step()  # “softmax回归的简洁实现”一节将用到

            train_l_sum += l.item()
            train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
            n += y.shape[0]
        test_acc = d2l.evaluate_accuracy(test_iter, net)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f' %
              (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
コード例 #7
0
    def initialize(self):
        """
            invoke before train
            1. reset output file
            2. set prev_time for cal cost time
            3. init trainers
            4. verbose set True print test_acc in valid_data
        """
        if self.output_file is None:
            self.output_file = sys.stdout
            self.stdout = sys.stdout
        else:
            self.output_file = open(self.output_file, "w")
            self.stdout = sys.stdout
            sys.stdout = self.output_file

        self.prev_time = datetime.datetime.now()

        if self.verbose:
            print " #", utils.evaluate_accuracy(self.valid_data, self.net,
                                                self.ctx)

        if self.trainers is None:
            self.trainers = [
                gluon.Trainer(
                    self.net.collect_params(), 'sgd', {
                        'learning_rate': self.policy['lr'],
                        'momentum': 0.9,
                        'wd': self.policy['wd']
                    })
            ]
コード例 #8
0
def LetNet_Gluon():
    print('start run...')
    
    batch_size = 256
    ctx = try_ctx()
    
    # Step_1: 定义模型(容器)
    net = gluon.nn.Sequential()
    # Step_2: 在模型(容器)域名内,添加两层卷积和池化层、两层Dense层。
    with net.name_scope():
        # 两层卷积Conv2D网络
        net.add(gluon.nn.Conv2D(channels=20, kernel_size=5,activation='relu'))
        net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
        net.add(gluon.nn.Conv2D(channels=50, kernel_size=3,activation='relu'))
        net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
        # 卷积网络输出结果扁平化处理
        net.add(gluon.nn.Flatten())
        # 两层Dense网络
        net.add(gluon.nn.Dense(128, activation='relu'))
        net.add(gluon.nn.Dense(10))

    # Step_3: 初始化模型
    net.initialize(ctx=ctx)
    print('initialize weight on', ctx)

    # Step_4: 构造损失函数:交叉熵
    softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
    # Step_5: 构造优化器:SGD
    trainer = gluon.Trainer(net.collect_params(), 'sgd',{'learning_rate': 0.5})

    # 优化迭代epochs次
    epochs = 5
    for epoch in range(epochs):
        train_loss = 0
        train_acc = 0
        
        # 每次取出batch_size(256)个数据,遍历数据集        
        train_data, test_data = load_data_set(batch_size)
        for data, label in train_data:
            # 将数据迁移置GPU(ctx设备)
            data = data.as_in_context(ctx)
            label = label.as_in_context(ctx)
            # 记录梯度:计算输出和损失函数时
            with autograd.record():
                output = net(data)
                loss = softmax_cross_entropy(output, label)
            # 反向传播
            loss.backward()
            # 优化器:更新参数
            trainer.step(batch_size)
            
            # 统计整个数据集的loss和acc:对数据集的每个batch_size大小数据块的平均loss和平均acc累加(应再/len(data)以求平均值)
            train_loss += nd.mean(loss).asscalar()
            train_acc += utils.accuracy(output, label)        
        test_acc = utils.evaluate_accuracy(test_data, net, ctx)        
        print('Epoch %d. Train loss is: %f;\nTrain acc is: %f;\nTest acc is: %f;\n' % \
                (epoch, train_loss/len(train_data), train_acc/len(train_data), test_acc))
   
    print("run over!")
    return net
コード例 #9
0
ファイル: cnn_basic.py プロジェクト: chamlhy/python
def train(train_data, test_data, net, loss, trainer, ctx, num_epochs, print_batches=None):
	for epoch in range(num_epoch):
		train_loss = 0.
		train_acc = 0.
		#判断是否为标准数据迭代器
		if isinstance(train_data, mx.io.MXDdataIter):
			train_data.reset()
		#获得索引和值
		for i, batch in enumerate(train_data):
			data, label = _get_batch(batch, ctx)
			with ag.record():
				output = net(data)
				L = loss(output, label)
			L.backward()

			#更新权重
			trainer.step(data.shape[0])

			train_loss += nd.mean(L).asscalar()
			train_acc += accuracy(output, label)

			n = i + 1
			#打印出指定样本的训练损失和准确率
			if print_batches and n % print_batches == 0:
				print('Batch %d. Loss: %f, Train acc: %f' % (
					n, train_loss/n, train_acc/n
					))

		test_acc = evaluate_accuracy(test_data, net, ctx)
		print("Epoch %d. Loss: %f, Train acc: %f, Test acc: %f" % (
			epoch, train_loss/n, train_acc/n, test_acc
			))
コード例 #10
0
def test_softmax_org():
    obj=SoftmaxOrg()
    x = torch.rand((2,5))
    print(x)
    # print(obj.softmax(x))
    y_hat=torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])
    y=torch.LongTensor([0,2])
    # print(obj.cross_entropy(y_hat,y))
    # print(obj.accuracy(y_hat,y))
    _,test_iter=utils.load_data_fashion_mnist()
    print(utils.evaluate_accuracy(test_iter,obj.net))
コード例 #11
0
    def after_epoch(self, epoch, train_loss, train_acc):
        """
            invoke after every epoch of train
            1. cal and print cost time the epoch
            2. print acc/loss info
            3. print lr
            4. update lr
        """
        # log info
        self.cur_time = datetime.datetime.now()
        h, remainder = divmod((self.cur_time - self.prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = "Time %02d:%02d:%02d" % (h, m, s)

        train_loss /= len(self.train_data)
        train_acc /= len(self.train_data)
        if train_acc < 1e-6:
            train_acc = utils.evaluate_accuracy(self.train_data, self.net,
                                                self.ctx)

        if self.valid_data is not None:
            valid_acc = utils.evaluate_accuracy(self.valid_data, self.net,
                                                self.ctx)
            epoch_str = (
                "epoch %d, loss %.5f, train_acc %.4f, valid_acc %.4f" %
                (epoch, train_loss, train_acc, valid_acc))
        else:
            epoch_str = ("epoch %d, loss %.5f, train_acc %.4f" %
                         (epoch, train_loss, train_acc))

        self.prev_time = self.cur_time

        self.output_file.write(
            epoch_str + ", " + time_str + ",lr " +
            str([trainer.learning_rate for trainer in self.trainers]) + "\n")
        self.output_file.flush()  # to disk only when flush or close

        if in_list(epoch + 1, self.policy['lr_period']):
            for trainer in self.trainers:
                trainer.set_learning_rate(trainer.learning_rate *
                                          self.policy['lr_decay'])
コード例 #12
0
def train(net, train_data, valid_data, num_epochs, lr, wd, ctx, lr_period,
          lr_decay):
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {
        'learning_rate': lr,
        'momentum': 0.9,
        'wd': wd
    })

    prev_time = datetime.datetime.now()
    plt_train_acc = []
    plt_valid_acc = []

    for epoch in range(num_epochs):
        train_loss = 0.0
        train_acc = 0.0
        if epoch > 0 and epoch % lr_period == 0:
            trainer.set_learning_rate(trainer.learning_rate * lr_decay)
        if epoch > 161 and epoch % 10 == 0:
            trainer.set_learning_rate(trainer.learning_rate * 0.4)
        for data, label in train_data:
            label = label.as_in_context(ctx)
            with autograd.record():
                output = net(data.as_in_context(ctx))
                loss = softmax_cross_entropy(output, label)
            loss.backward()
            trainer.step(batch_size)
            train_loss += nd.mean(loss).asscalar()
            train_acc += utils.accuracy(output, label)
        cur_time = datetime.datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = "Time %02d:%02d:%02d" % (h, m, s)
        if valid_data is not None:
            valid_acc = utils.evaluate_accuracy(valid_data, net, ctx)
            epoch_str = ("Epoch %d. Loss: %f, Train acc %f, Valid acc %f, " %
                         (epoch, train_loss / len(train_data),
                          train_acc / len(train_data), valid_acc))
            plt_train_acc.append(train_acc / len(train_data))
            plt_valid_acc.append(valid_acc)
        else:
            epoch_str = ("Epoch %d. Loss: %f, Train acc %f, " %
                         (epoch, train_loss / len(train_data),
                          train_acc / len(train_data)))

        prev_time = cur_time

        print(epoch_str + time_str + ', lr ' + str(trainer.learning_rate))
    # plot
    if valid_data is not None:
        plt.plot(plt_train_acc)
        plt.plot(plt_valid_acc)
        plt.legend(['train_acc', 'test_acc'])
        plt.savefig("Loss.png")
コード例 #13
0
def activeTrain(idx, lr, isActiveLearn):
    
    if isActiveLearn:
        method='active'
        train_loader = active_train_loader
        valid_loader = active_valid_loader
    else:
        method='normal'
        train_loader = normal_train_loader
        valid_loader = normal_valid_loader
    
    net = copy.deepcopy(best_models[idx])
    net.to(device)
    net.train()
    criterion = nn.BCELoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    best_val_acc = 0.
    
    num_epoch = 20
    
    scheduler = StepLR(optimizer, step_size=5, gamma=0.5)
    
    for epoch in range(num_epoch):
        loss_count=0
        loss_sum=0
        for idx, (img, label) in enumerate(train_loader):
            img = img.to(device)
            label = label.to(device, dtype=torch.float)
            label = label.view(-1,1)
            pred = net(img)

            optimizer.zero_grad()
            loss = criterion(pred, label)
            loss.backward()
            optimizer.step()

            loss_sum+=loss.item()
            loss_count+=1
            if idx%10==0:
                net.eval()
                val_acc = evaluate_accuracy(net, valid_loader, device)
                if val_acc > best_val_acc:
                    best_val_acc = val_acc
                    best_model = copy.deepcopy(net)

                net.train()
        scheduler.step()   
#     save_esemble_models(best_val_acc, net.eval())
    return best_val_acc, best_model
コード例 #14
0
def train():
    for epoch in range(5):
        train_loss = 0.
        train_acc = 0.
        for data, label in train_data:
            with autograd.record():
                output = net(data)
                loss = softmax_cross_entropy(output, label)
            loss.backward()
            trainer.step(batch_size)

            train_loss += nd.mean(loss).asscalar()
            train_acc += utils.accuracy(output, label)

        test_acc = utils.evaluate_accuracy(test_data, net)
        print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
            epoch, train_loss/len(train_data), train_acc/len(train_data), test_acc))
コード例 #15
0
def train(num_gpus, batch_size, lr):
    train_data, test_data = utils.load_data_fashion_mnist(batch_size)

    ctx = [gpu[i] for i in range(num_gpus)]
    print('running on', ctx)

    dev_params = [get_params(params, c) for c in ctx]

    for epoch in range(5):
        start = time()
        for data, label in train_data:
            train_batch(data, label, dev_params, ctx, lr)
        nd.waitall()
        print('Epoch: %d, training time = %.1f sec'%(epoch, time() - start))

        # valiting on GPU 0
        net = lambda data : lenet(data, dev_params[0])
        test_acc = utils.evaluate_accuracy(test_data, net, ctx[0])
        print('Validataion Accuracy = %.4f'%(test_acc))
コード例 #16
0
ファイル: main.py プロジェクト: LeeJuly30/RangeLoss-For-Gluno
def train():
    mnist_set = gluon.data.vision.MNIST(train=True, transform=transform)
    test_mnist_set = gluon.data.vision.MNIST(train=False, transform=transform)
    data = []
    label = []
    for i in range(len(mnist_set)):
        data.append(mnist_set[i][0][np.newaxis, :, :, :])
        label.append(mnist_set[i][1][np.newaxis, ])
    data = np.concatenate(data, axis=0)
    label = np.concatenate(label, axis=0)
    full_set = (data, label)
    ctx = mx.gpu(0)
    model = LeNetPlus(normalize=arg.normalize)
    model.hybridize()
    model.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)
    train_iter = RangeLossDataLoader(full_set, arg.num_class, arg.num_in_class,
                                     15000)
    test_iter = mx.gluon.data.DataLoader(test_mnist_set, 500, shuffle=False)
    softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
    Range_loss = RangeLoss(arg.alpha, arg.beta, arg.topk, arg.num_class,
                           arg.num_in_class, 2, arg.margin)
    Range_loss.initialize(mx.init.Xavier(), ctx=ctx)
    trainer = gluon.Trainer(model.collect_params(),
                            optimizer='adam',
                            optimizer_params={
                                'learning_rate': arg.lr,
                                'wd': 5e-4
                            })
    for i, (data, label) in enumerate(train_iter):
        data = nd.array(data, ctx=ctx)
        label = nd.array(label, ctx=ctx)
        with autograd.record():
            output, features = model(data)
            softmax_loss = softmax_cross_entropy(output, label)
            range_loss = Range_loss(features, label)
            loss = softmax_loss + range_loss
        loss.backward()
        trainer.step(data.shape[0])
        if ((i + 1) % 3000 == 0):
            test_accuracy, test_ft, _, test_lb = evaluate_accuracy(
                test_iter, model, ctx)
            print(test_accuracy)
            plot_features(test_ft, test_lb)
コード例 #17
0
ファイル: main.py プロジェクト: heechul90/Mxnet_Learning
def train():
    for epoch in range(5):
        train_loss = 0.
        train_acc = 0.
        for data, label in train_data:
            label = label.as_in_context(ctx)
            with autograd.record():
                output = net(data)
                loss = softmax_cross_entropy(output, label)
            loss.backward()
            SGD(params, learning_rate / batch_size)

            train_loss += nd.mean(loss).asscalar()
            train_acc += accuracy(output, label)

        test_acc = evaluate_accuracy(test_data, net, ctx)
        print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" %
              (epoch, train_loss / len(train_data),
               train_acc / len(train_data), test_acc))
コード例 #18
0
def train(net, train_data, valid_data, num_epochs, lr, wd, ctx, lr_period, lr_decay):
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'momentum': 0.9, 'wd': wd})
    prev_time = datetime.datetime.now()

    for epoch in range(num_epochs):
        train_loss = 0.0
        train_accuracy = 0.0
        """
        if epoch > 0 and epoch % lr_period == 0:
            trainer.set_learning_rate(trainer.learning_rate * lr_decay)
        """
        if epoch in [90, 140]:
            trainer.set_learning_rate(trainer.learning_rate * lr_decay)

        for data, label in train_data:
            label = label.as_in_context(ctx)
            with autograd.record():
                output = net(data.as_in_context(ctx))
                loss = softmax_cross_entropy(output, label)
            loss.backward()
            trainer.step(batch_size)

            train_loss += nd.mean(loss).asscalar()
            train_accuracy += utils.accuracy(output, label)

        curr_time = datetime.datetime.now()
        h, remainder = divmod((curr_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = "Time %02d:%02d:%02d" % (h, m, s)

        if valid_data is not None:
            valid_acc = utils.evaluate_accuracy(valid_data, net, ctx)
            epoch_str = ("Epoch %d. Loss: %f, Train acc %f, Valid acc %f, "
                         % (epoch, train_loss / len(train_data), train_accuracy / len(train_data), valid_acc))
        else:
            epoch_str = ("Epoch %d. Loss: %f, Train acc %f, "
                         % (epoch, train_loss / len(train_data), train_accuracy / len(train_data)))

        print(epoch_str + time_str + ', lr ' + str(trainer.learning_rate))
        prev_time = curr_time

        net.save_params('../model/cifar10/cifar10-%04d.params' % epoch)
コード例 #19
0
def train(net_vgg, train_data, valid_data, test_data, batch_size, num_epochs,
          lr, ctx):
    trainer = gluon.Trainer(net_vgg.collect_params(), 'adam', {
        'learning_rate': lr,
    })

    max_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss()
    prev_time = datetime.datetime.now()
    for epoch in range(num_epochs):
        train_loss = 0.0
        train_acc = 0.0
        batch = 0
        for data, label in train_data:
            data = data.as_in_context(ctx)
            label = label.as_in_context(ctx)
            with autograd.record():
                output = net_vgg(data)
                loss = max_entropy_loss(output, label)
            loss.backward()
            trainer.step(batch_size)
            train_loss += nd.mean(loss).asscalar()
            train_acc += utils.accuracy(output, label)
            batch += 1

        cur_time = datetime.datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = "Time %02d:%02d:%02d" % (h, m, s)

        valid_acc, test_loss = utils.evaluate_accuracy(valid_data, net_vgg,
                                                       ctx)
        epoch_str = (
            "Epoch %d. Loss: %f, Train acc %f, Valid acc %f, Test loss: %f " %
            (epoch, train_loss / len(train_data), train_acc / len(train_data),
             valid_acc, test_loss))
        prev_time = cur_time
        print(epoch_str + time_str + ', lr ' + str(trainer.learning_rate))
        sys.stdout.flush()
        net_vgg.save_params('./model_out/vggnet_epoch_%d' % epoch)
        utils.predict(test_data, net_vgg,
                      './predict_result/result.epoch_%d' % epoch, ctx)
コード例 #20
0
ファイル: main.py プロジェクト: heechul90/Mxnet_Learning
def train():
    learning_rate = .1

    for epoch in range(10):
        train_loss = 0.
        train_acc = 0.
        for data, label in train_data:
            with autograd.record():
                output = net(data)
                loss = cross_entropy(output, label)
            loss.backward()
            # 将梯度做平均,这样学习率会对batch size不那么敏感
            SGD(params, learning_rate / batch_size)

            train_loss += nd.mean(loss).asscalar()
            train_acc += accuracy(output, label)

        test_acc = evaluate_accuracy(test_data, net)
        print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" %
              (epoch, train_loss / len(train_data),
               train_acc / len(train_data), test_acc))
コード例 #21
0
ファイル: main.py プロジェクト: wangx404/Center_Loss_in_MXNet
def test():
    """
    Test model accuracy on test dataset.
    测试模型在测试集上的准确率。
    """
    print("Start to test...")
    ctx = mx.gpu() if args.use_gpu else mx.cpu()

    _, test_iter = data_loader(args.batch_size)

    model = LeNetPlus()
    model.load_parameters(os.path.join(args.ckpt_dir,
                                       args.prefix + "-best.params"),
                          ctx=ctx,
                          allow_missing=True)
    #
    center_net = CenterLoss(num_classes=args.num_classes,
                            feature_size=args.feature_size,
                            lmbd=args.lmbd,
                            ctx=mx.cpu())
    center_net.load_parameters(os.path.join(
        args.ckpt_dir, args.prefix + "-feature_matrix.params"),
                               ctx=ctx)

    start_time = time.time()
    test_accuracy, features, predictions, labels = evaluate_accuracy(
        test_iter, model, center_net, args.eval_method, ctx)
    elapsed_time = time.time() - start_time
    print("Test_acc: %s, Elapsed_time: %f s" % (test_accuracy, elapsed_time))

    # make directory
    if not os.path.exists(args.out_dir):
        os.makedirs(args.out_dir)
    # draw feature map
    if args.plotting:
        plot_features(features,
                      labels,
                      num_classes=args.num_classes,
                      fpath=os.path.join(args.out_dir, "%s.png" % args.prefix))
コード例 #22
0
ファイル: standard_mlp_gluon.py プロジェクト: xcszbdnl/Toy
net = gluon.nn.Sequential()
with net.name_scope():
    net.add(gluon.nn.Flatten())
    net.add(gluon.nn.Dense(256, activation="relu"))
    net.add(gluon.nn.Dense(10))
net.initialize()



batch_size = 256
train_data, test_data = loadMnistData(batch_size)

softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.5})

for epoch in range(5):
    train_loss = 0.
    train_acc = 0.
    for data, label in train_data:
        with autograd.record():
            output = net(data)
            loss = softmax_cross_entropy(output, label)
        loss.backward()
        trainer.step(batch_size)

        train_loss += nd.mean(loss).asscalar()
        train_acc += accuracy(output, label)

    test_acc = evaluate_accuracy(test_data, net)
    print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
        epoch, train_loss/len(train_data), train_acc/len(train_data), test_acc))
コード例 #23
0
#     print(data.shape)
#     break
net = get_net()
ctx = utils.getCtx()

net.initialize(ctx=ctx, init=init.Xavier())

softmax_loss = loss.SoftmaxCrossEntropyLoss()

epochs = 5

trainer = Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.5})

for epoch in range(epochs):
    total_loss = .0
    total_acc = .0
    for data, label in train_iter:
        with autograd.record():
            output = net(data)
            losses = softmax_loss(output, label)
        losses.backward()
        trainer.step(batch_size)

        total_loss += nd.mean(losses).asscalar()
        total_acc += utils.accuracy(output, label)

    test_acc = utils.evaluate_accuracy(test_iter, net)
    print('Epoch %d, Train loss: %f, Train acc: %f, Test acc: %f' % (
        epoch, total_loss / len(train_iter), total_acc / len(train_iter), test_acc
    ))
コード例 #24
0
ファイル: cnn_gluon.py プロジェクト: davidwang8088/MXnet_test
    net.add(gluon.nn.Conv2D(channels=50, kernel_size=3, activation='relu'))
    net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
    net.add(gluon.nn.Flatten())
    net.add(gluon.nn.Dense(256, activation='relu'))
    net.add(gluon.nn.Dense(64, activation='relu'))
    net.add(gluon.nn.Dense(10))
net.initialize()

cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss()

trainer = gluon.Trainer(net.collect_params(), "sgd",
                        {'learning_rate': learning_rate})

for e in range(5):
    train_loss = 0.
    train_acc = 0.
    for data, label in train_data:
        with autograde.record():
            output = net(data)
            loss = cross_entropy_loss(output, label)
        loss.backward()
        trainer.step(batch_size)

        train_loss += nd.mean(loss).asscalar()
        train_acc += utils.accuracy(output, label)

    test_acc = utils.evaluate_accuracy(test_data, net, ctx=ctx)

    print "%d epoach: the train loss is %f, the train accracy is %f, the test accuracy is %f" % (
        e, train_loss / len(train_data), train_acc / len(train_data), test_acc)
コード例 #25
0
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})

#############################################
### 训练 #######################
#learning_rate = .1#学习率
epochs = 7##训练迭代 次数 训练整个训练即的次数
for epoch in range(epochs):
    train_loss = 0.# 损失
    train_acc = 0. #准确度
    for data, label in train_data:#训练数据集 样本和标签
        with autograd.record():#自动微分
            output = net(data) #网络输出
            loss = softmax_cross_entropy(output, label)#损失
        loss.backward()#向后传播
        # 将梯度做平均,这样学习率会对batch size不那么敏感
        #SGD(params, learning_rate/batch_size)
        trainer.step(batch_size)
        train_loss += nd.mean(loss).asscalar()#损失
        train_acc += utils.accuracy(output, label)  #准确度

    test_acc = utils.evaluate_accuracy(test_data, net)#验证数据集的准确度
    print("训练次数 %d. 损失Loss: %f, 训练准确度Train acc %f, 测试准确度Test acc %f" % (
        epoch, train_loss/len(train_data), train_acc/len(train_data), test_acc))







コード例 #26
0
    alexNet = AlexNet(classes=10)
    alexNet = alexNet.to(device)
    optimizer = optim.Adam(alexNet.parameters(), lr=1e-3)
    lossFN = nn.CrossEntropyLoss()

    trainDL, valDL = load_data_Fnt10(INPUT_SIZE, BATCH_SIZE)

    num_epochs = 10
    for epoch in range(num_epochs):
        sum_loss = 0
        sum_acc = 0
        batch_count = 0
        n = 0
        for X, y in tqdm(trainDL):
            X = X.to(device)
            y = y.to(device)
            y_pred = alexNet(X)
            loss = lossFN(y_pred, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            sum_loss += loss.cpu().item()
            sum_acc += (y_pred.argmax(dim=1) == y).sum().cpu().item()
            n += y.shape[0]
            batch_count += 1
        test_acc = evaluate_accuracy(valDL, alexNet)
        print("epoch %d: loss=%.4f \t acc=%.4f \t test acc=%.4f" %
              (epoch + 1, sum_loss / n, sum_acc / n, test_acc))
コード例 #27
0
def train(epoch=10, batch_size=10, dataset_path=None, one_hot=False):

    if one_hot:
        loss_func = nn.CrossEntropyLoss()
        optimizer = optim.SGD(net.parameters(), lr=LR)
    else:
        loss_func = nn.BCELoss()
        optimizer = optim.RMSprop(net.parameters(), lr=LR, alpha=0.9)

    if dataset_path is not None and DEVICE != "kaggle":
        if sys.platform.startswith('win'):
            TRAIN_PATH = dataset_path + '\\train'
            VALID_PATH = dataset_path + '\\test'
        elif sys.platform.startswith('linux'):
            TRAIN_PATH = dataset_path + '/train'
            VALID_PATH = dataset_path + '/test'
    elif DEVICE == "kaggle":
        TRAIN_PATH = '../input/dogs-vs-cats/train/train'
        VALID_PATH = '../input/dogs-vs-cats/test/test'
        DATASET_PATH = '../input/dogs-vs-cats'
        # print(TRAIN_PATH)
    else:
        raise ValueError("Dataset can not be None")
    
    cat_dog_dataset = dataloader.CatVsDogDataset(TRAIN_PATH, mode="train", one_hot=one_hot)
    train_loader = Data(cat_dog_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
    # train_loader = Data(cat_dog_dataset, batch_size=batch_size, shuffle=True)
    cat_dog_dataset_test = dataloader.CatVsDogDataset(TRAIN_PATH, mode="test", one_hot=one_hot)
    test_loader = Data(cat_dog_dataset_test, batch_size=batch_size, shuffle=True, num_workers=0)
    # test_loader = Data(cat_dog_dataset_test, batch_size=batch_size, shuffle=True)

    cat_dog_dataset_valid = dataloader.CatVsDogValid(VALID_PATH)
    valid_loader = Data(cat_dog_dataset_valid, batch_size=batch_size, shuffle=True, num_workers=0)

    start_time = time.time()
    print("Net: VGG%s, Total epoch: %d, Batch_size: %d, LR: %f, Device: %s"%(NET, epoch, batch_size, LR, DEVICE))
    time.sleep(0.1)

    for epoch in range(epoch):
        print("\nEpoch: %d"%(epoch + 1))
        time.sleep(0.1)

        train_loss_sum, train_acc_sum, n = 0.0, 0.0, 0

        for batch, (x, y) in enumerate(tqdm(train_loader)):
            y_hat = net(x)
            # if batch_size > 1, use sum() to calculate per batch loss
            if one_hot:
                loss = loss_func(y_hat, y).sum()
            else:
                loss = loss_func(y_hat, y)

            # print("\t\tBatch #{0}/{1}".format(batch+1, len(train_loader)) + "Loss = %.6f"%float(loss))

            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()

            loss.backward()
            if optimizer is None:
                optimizer = optim.SGD(net.parameters(), lr=globals(LR))
                optimizer.step()
            else:
                optimizer.step()

            # convert tensor data type to float data type
            # train_loss_sum += loss.item()
            # train_acc_sum += (y_hat == y).sum().item()

            if one_hot:
                train_loss_sum += loss_func(y_hat, y).sum().item()
                train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
            else:
                train_loss_sum += loss.item()
                train_acc_sum += (torch.round(y_hat) == y).float().mean().item()
            
            # print(train_loss_sum)
            # print(train_acc_sum)
            # train_loss_sum += float(loss_func(y_hat, y))
            
        print('Epoch: {epoch}, Loss:{loss}, Accuracy:{accuracy}, Average_loss:{average_loss}, Average_accuracy:{average_accuracy}%'.\
            format(epoch=epoch+1, loss=float('%.6f' % train_loss_sum), accuracy=float('%.6f' % train_acc_sum), \
                average_loss=float('%.6f' %(train_loss_sum/(batch+1))), \
                    average_accuracy=float('%.6f' % (train_acc_sum/(batch+1)*100))))

        if (epoch+1) % RECORD_EPOCH == 0:
            test_acc = evaluate_accuracy(test_loader, net)
            print('Epoch: {epoch}, Valid accuracy: {valid:.6f}%'.format(epoch=epoch+1, valid=test_acc*100))

    end_time = time.time()
    h, m, s = second2clock(end_time - start_time)
    print("Total trainning time: " + "%d hours %02d mins %.2f seconds" % (h, m, s))
    start_time = time.time()
    test_acc = evaluate_accuracy(test_loader, net)
    end_time = time.time()
    h, m, s = second2clock(end_time - start_time)
    print("Test accuracy: {:.6f}".format(test_acc*100) + "%, Eval time: " + "%d hours %02d mins %.2f seconds" % (h, m, s))
    
    test_img, test_label = iter(test_loader).__next__()
    show_result(net, test_img[0:SHOW_PIC_NUM], test_label[0:SHOW_PIC_NUM], rgb=RGB)

    if SHOW_VALID_PIC:
        valid_img = iter(valid_loader).__next__()
        show_valid(net, valid_img[0:SHOW_PIC_NUM], rgb=RGB)
        
    if CSV:
        valid_loader = Data(cat_dog_dataset_valid, batch_size=1, shuffle=False, num_workers=0)
        creat_csv(net, valid_loader)
    net = LeNet()
    batch_size = 256
    train_iter, test_iter = load_data_fashion_mnist(
        batch_size, root='./dataset/FashionMNIST')
    learning_rate = 1e-3
    num_epochs = 20
    optimizer = optim.Adam(net.parameters(), lr=learning_rate)
    net = net.to(device)
    print('training on ', device)
    loss = nn.CrossEntropyLoss()
    batch_count = 0
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n, start = 0., 0., 0, time.time()
        for X, y in train_iter:
            X = X.to(device)
            y = y.to(device)
            y_pred = net(X)
            l = loss(y_pred, y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            train_l_sum += l.cpu().item()
            train_acc_sum += (y_pred.argmax(dim=1) == y).sum().cpu().item()
            n += y.shape[0]
            batch_count += 1
        test_acc = evaluate_accuracy(test_iter, net)
        print(
            'epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time % .1f sec'
            % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n,
               test_acc, time.time() - start))
    conv_arch = [(2, 3, 64), (2, 64, 128), (3, 128, 256), (3, 256, 512), (3, 512, 512)]
    vgg16 = VGG16(conv_arch=conv_arch, fc_features=7 * 7 * 512, fc_hidden_units=4096, classes=10)
    vgg16 = vgg16.to(device)
    optimizer = optim.Adam(vgg16.parameters(), lr=1e-3)
    lossFN = nn.CrossEntropyLoss()

    trainDL, valDL = load_data_Fnt10(INPUT_SIZE, BATCH_SIZE)

    num_epochs = 10
    for epoch in range(num_epochs):
        sum_loss = 0
        sum_acc = 0
        batch_count = 0
        n = 0
        for X, y in tqdm(trainDL):
            X = X.to(device)
            y = y.to(device)
            y_pred = vgg16(X)
            loss = lossFN(y_pred, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            sum_loss += loss.cpu().item()
            sum_acc += (y_pred.argmax(dim=1) == y).sum().cpu().item()
            n += y.shape[0]
            batch_count += 1
        test_acc = evaluate_accuracy(valDL, vgg16)
        print("epoch %d: loss=%.4f \t acc=%.4f \t test acc=%.4f" % (epoch + 1, sum_loss / n, sum_acc / n, test_acc))
コード例 #30
0

max_acc = 0
max_k = 0
# compare the acc of the 10 net on the entire data , get the best classifier
for i in range(k_cross):
#    net = utils.Perceptron(2)
    net = models.resnet18_v2(classes=2)
    test_data_array = data
    for image in test_data_array:
        image_tem = nd.transpose(image,axes=(1,2,0))*255
        image = utils.apply_aug_list(image_tem,utils.test_augs)
        image = nd.transpose(image,(2,0,1))/255
    test_data = mx.io.NDArrayIter(data = test_data_array,label=label,batch_size=batch_size,shuffle=True)
    net.load_params(os.path.join(path_net,str(i)) , ctx = ctx)
    if utils.evaluate_accuracy(test_data,net,ctx) > max_acc:
        max_acc = utils.evaluate_accuracy(test_data,net,ctx)
        max_k = i
print('The best net is net%i , accuracy of the entire data is :%.4f '% (max_k , max_acc))
os.rename(os.path.join(path_net,str(max_k)),os.path.join(path_net,'bestnet_pretrained'))
        









コード例 #31
0
    h1 = relu(nd.dot(X, W1) + b1)# 隐含层输出 非线性激活
    output = nd.dot(h1, W2) + b2
    return output

##Softmax和交叉熵损失函数
## softmax 回归实现  exp(Xi)/(sum(exp(Xi))) 归一化概率 使得 10类概率之和为1
#交叉熵损失函数
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()

## 开始训练
learning_rate = .5#学习率
epochs = 7        ##训练迭代训练集 次数
for epoch in range(epochs):##每迭代一次训练集
    train_loss = 0.##损失
    train_acc = 0. ##准确度
    for data, label in train_data:#训练集
        with autograd.record():#自动微分
            output = net(data)#模型输出 向前传播
            loss = softmax_cross_entropy(output, label)#计算损失
        loss.backward()#向后传播
        utils.SGD(params, learning_rate/batch_size)#随机梯度下降 训练更新参数 学习率递减

        train_loss += nd.mean(loss).asscalar()#损失
        train_acc += utils.accuracy(output, label)#准确度

    test_acc = utils.evaluate_accuracy(test_data, net)#测试集测试
    print("E次数 %d. 损失: %f, 训练准确度 %f,  测试准确度%f" % (
        epoch, train_loss/len(train_data),
        train_acc/len(train_data), test_acc))