Beispiel #1
0
def main():

    data = pd.read_csv('wine.csv')
    y = data['class'].values

    X = data.drop('class', axis=1).values

    X = normalize(X)  #数据标准化
    label = ['best', 'better', 'good']

    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.4)  #将数划分为训练集和测试集,标签也做同样的划分

    clf = NaiveBayes()  #引用朴素贝叶斯分类器

    clf.fit(X_train, y_train)

    y_pred = clf.predict(X_test)

    accuracy = accuracy_score(y_test, y_pred)

    while accuracy < 0.98:
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=0.4)

        clf = NaiveBayes()
        clf.fit(X_train, y_train)

        y_pred = clf.predict(X_test)

        accuracy = accuracy_score(y_test, y_pred)

    print("Accuracy:", accuracy)

    # 使用PCA将维数降为2并绘制结果
    Plot().plot_in_2d(X_test,
                      y_pred,
                      title="Naive Bayes",
                      accuracy=accuracy,
                      legend_labels=label)
Beispiel #2
0
def evaluate():
    use_cuda = torch.cuda.is_available()
    path = os.path.expanduser('/home/yxk/data/')
    val_data = voc_loader.VOC2012ClassSeg(root=path,
                                          split='val',
                                          transform=True)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=5)
    print('load model .....')
    vgg_model = models.VGGNet(requires_grad=True)
    fcn_model = models.FCN8s(pretrained_net=vgg_model, n_class=n_class)
    fcn_model.load_state_dict(torch.load('params.pth'))

    if use_cuda:
        fcn_model.cuda()
    fcn_model.eval()

    label_trues, label_preds = [], []
    # for idx, (img, label) in enumerate(val_loader):
    for idx in range(len(val_data)):
        img, label = val_data[idx]
        img = img.unsqueeze(0)
        if use_cuda:
            img = img.cuda()
        img = Variable(img)

        out = fcn_model(img)  # 1, 21, 320, 320

        pred = out.data.max(1)[1].squeeze_(1).squeeze_(0)  # 320, 320

        if use_cuda:
            pred = pred.cpu()
        label_trues.append(label.numpy())
        label_preds.append(pred.numpy())

        if idx % 30 == 0:
            print('evaluate [%d/%d]' % (idx, len(val_loader)))

    metrics = tools.accuracy_score(label_trues, label_preds)
    metrics = np.array(metrics)
    metrics *= 100
    print('''\
            Accuracy: {0}
            Accuracy Class: {1}
            Mean IU: {2}
            FWAV Accuracy: {3}'''.format(*metrics))
Beispiel #3
0
def oneFit(X,
           y,
           activation="relu",
           hidden_layers=(20, 20),
           test_size=0.2,
           loss=False):
    '''
    process of one fit
    loss: return loss during iteration information if True
    return accuracy or array of loss
    '''
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=test_size)
    y_label = array2Label(y_test)  # transform 2d array into 1d labels
    # 激活函数使用relu
    nn = NeuralNetworkClassifier(hidden_layer_sizes=hidden_layers,
                                 activation=activation)
    nn.fit(X_train, y_train)
    y_pred = nn.predict(X_test)
    # random guess 0.006
    if not loss:
        return accuracy_score(y_label, y_pred)
    return nn.getIterLoss()
        optimizer.zero_grad()

        # forward + backward + optimize
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        # print statistics
        avg_loss += loss.data[0]
    # Compute stats
    net.eval()
    avg_loss /= len(trainloader)
    val_loss = compute_loss(criterion, validationloder, net)
    VAL_LOSS.append(val_loss)
    if epoch % display_step == 0:
        test_acc = accuracy_score(net, testloader)
        val_acc = accuracy_score(net, validationloder)
        test_loss = compute_loss(criterion, testloader, net)
        train_acc = accuracy_score(net, trainloader)
        print(
            "Epoch: {}, Time: {:.0f}, Train loss: {:.3f}, Validation accuracy: {:.3f}, Validation loss: {:.3f}"
            .format(epoch,
                    time() - start_time, avg_loss, val_acc, val_loss))

        TEST_ACC.append(test_acc)
        VAL_ACC.append(val_acc)
        TRAIN_ACC.append(train_acc)

        TEST_LOSS.append(test_loss)
        TRAIN_LOSS.append(avg_loss)
    if len(VAL_LOSS) > early_stopping: