def main(argv):
    args = parser.parse_args(argv[1:])

    (train_x, train_y), (test_x, test_y) = instrument_data.load_data()

    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    classifier = tf.estimator.Estimator(model_fn=estimator_model,
                                        model_dir='temp/instruments_temp',
                                        params={
                                            'feature_columns':
                                            my_feature_columns,
                                            'hidden_units': [16, 16],
                                            'n_classes':
                                            len(instrument_data.INSTRUMENTS),
                                        })

    classifier.train(input_fn=lambda: instrument_data.train_input_fn(
        train_x, train_y, args.batch_size),
                     steps=args.train_steps)

    evaluate(classifier)

    with open('model_mlp.pickle', 'wb') as f:
        pickle.dump(classifier, f, pickle.HIGHEST_PROTOCOL)

    predictions = classifier.predict(
        input_fn=lambda: instrument_data.eval_input_fn(test_x, test_y, args.
                                                       batch_size))
    prediction_list = []
    for pred_dict, expec in zip(predictions, test_y):
        class_id = pred_dict['class_ids'][0]
        prediction_list.append(class_id)

    confusion_matrix = tf.compat.v2.math.confusion_matrix(
        test_y,
        prediction_list,
    )

    with tf.Session() as sess:
        np.set_printoptions(precision=2)

        cm = sess.run(confusion_matrix)
        plt.figure()
        plot_confusion_matrix(
            cm,
            classes=instrument_data.INSTRUMENTS,
            normalize=True,
        )
        plt.show()
Ejemplo n.º 2
0
def main():
    writer = SummaryWriter(max_queue=10000)
    writer_logdir = str(writer.log_dir)[5:]
    print('writer_logdir :', writer_logdir)
    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
    ia.seed(100)
    np.random.seed(100)
    torch.manual_seed(100)
    torch.cuda.manual_seed(100)
    torch.cuda.manual_seed_all(100)

    Model = writer_logdir
    if not os.path.isdir('./{}'.format(Model)):
        os.mkdir('./{}'.format(Model))


    ##for matplotlib absolute colorbar
    levels = [0, 1, 2, 3, 4]
    colors = ['green', 'yellow', 'blue', 'red']
    cmap, norm = matplotlib.colors.from_levels_and_colors(levels, colors)


    device = 'cuda'

    nets = [NewDensenet() for _ in range(3)]  # 3 is K-fold number
    optimizers = [optim.Adam(nets[i].parameters(), lr=0.001, weight_decay=0.00001) for i in range(3)]
    schedulers = [optim.lr_scheduler.MultiStepLR(optimizer, milestones=[30, 40, 50], gamma=0.3) for optimizer in
                  optimizers]
    criterion = nn.NLLLoss(weight=None, reduction='mean')
    dataset = make_dataset() #[(dataset_train0,dataset_val0),(dataset_train1,dataset_val),(dataset_train1,dataset_val1)]

    ##viewing some images for sanity check
    #image_show(dataset_train=dataset[0][0], dataset_val=dataset[0][1])

    for k_fold in [2, 1, 0]:
        Dataloader_train = DataLoader(dataset[k_fold][0], batch_size=2, shuffle=True, num_workers=4, drop_last=True)
        Dataloader_val = DataLoader(dataset[k_fold][1], batch_size=2, shuffle=True, num_workers=4, drop_last=True)

        total_iter_train = [40 * len(Dataloader_train), 40 * len(Dataloader_train), 40 * len(Dataloader_train)]
        total_iter_val = [40 * len(Dataloader_val), 40 * len(Dataloader_val), 40 * len(Dataloader_val)]

        net = nets[k_fold]
        net = nn.DataParallel(net)
        net.to(device)

        optimizer = optimizers[k_fold]
        scheduler = schedulers[k_fold]

        running_loss = 0
        iter_train = 0
        iter_val = 0

        train_acc_whole_epoch = [0, ]
        val_acc_whole_epoch = [0, ]
        train_acc_one_epoch = 0
        val_acc_one_epoch = 0

        best_epoch = None
        best_model_st_dct = None
        best_optimizer_st_dct = None
        best_val_acc = 0

        epoch = 0
        whole_epoch = 40
        while epoch < whole_epoch:
            scheduler.step()
            for i, (images, targets) in enumerate(Dataloader_train):
                iter_train += 1
                net.train()

                images = images.type('torch.FloatTensor')
                targets = targets.type('torch.LongTensor')

                images, targets = images.to(device), targets.to(device)

                gt_pixelwise = torch.ones((2, 64, 64)).type('torch.LongTensor').to(device)
                gt_pixelwise = torch.mul(gt_pixelwise, targets.view(-1, 1, 1))


                scores = net(images)

                loss = criterion(scores, gt_pixelwise)  # scores = (N, 4, 63, 63), gt_pixelwise = (N, 63, 63)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                with torch.set_grad_enabled(False):
                    ##for running loss
                    running_loss = 0.1 * running_loss + 0.9 * loss
                    if (i + 1) % 5 == 0:
                        print(
                            'train mode | epoch : [%d/%d] | #fold : [%d/3] | running_loss : [%.5f] iterations : [%d/%d]'
                            % (epoch + 1, whole_epoch, k_fold + 1, running_loss, iter_train, total_iter_train[k_fold]))

                    pred_pixelwise = torch.argmax(scores, dim=1)  # pred_pixelwise = (batch_size, 63, 63), dype = torch.LongTensor

                    ##for accuracy
                    predicts = torch.zeros((2,), dtype=torch.long).to(device)

                    for p in range(2):
                        zero_pixnum = (pred_pixelwise[p] == 0).sum()
                        one_pixnum = (pred_pixelwise[p] == 1).sum()
                        two_pixnum = (pred_pixelwise[p] == 2).sum()
                        thr_pixnum = (pred_pixelwise[p] == 3).sum()
                        predicts[p:p+1] = torch.argmax(
                            torch.IntTensor([zero_pixnum, one_pixnum, two_pixnum, thr_pixnum]), dim=0)

                    true1_false0 = targets == predicts
                    train_acc_one_epoch += true1_false0.sum().item()


                    ##for draw
                    # RGB, pred, GT
                    if i >= len(Dataloader_train) - 5:
                        print('let\'s draw!')
                        rand = np.random.randint(2)
                        rgb = images[rand].cpu().numpy()  # dtype = float, (1024, 1024, 3)
                        pred_pixelwise = pred_pixelwise[rand].cpu().numpy()  # dtype = # int64?, (64, 64)
                        GT = gt_pixelwise[rand].cpu().numpy()  # dtype = long, (64, 64) either 0,1,2,3
                        GT[0][0] = 0
                        GT[0][1] = 1
                        GT[0][2] = 2
                        GT[0][3] = 3
                        fig = plt.figure(figsize=(9, 3))
                        ax1 = fig.add_subplot(131)
                        ax2 = fig.add_subplot(132)
                        ax3 = fig.add_subplot(133)

                        im1 = ax1.imshow(np.transpose(rgb, (1, 2, 0)) / 255)
                        im2 = ax2.imshow(pred_pixelwise, cmap=cmap, norm=norm, interpolation='none')
                        im3 = ax3.imshow(GT, cmap=cmap, norm=norm, interpolation='none')

                        fig.set_constrained_layout_pads(w_pad=2. / 72., h_pad=2. / 72.,
                                                        hspace=0., wspace=0.)
                        # CB = fig.colorbar(ax2, shrink=0.8, extend='both')

                        divider = make_axes_locatable(ax2)
                        cax = divider.append_axes('right', size='3%', pad=0.03)
                        fig.colorbar(im2, cax=cax, orientation='vertical')

                        writer.add_figure('Train|{}fold|{}epoch|last5_iter_figures'.format(k_fold + 1, epoch), fig,
                                          epoch + 1)

            ##for running loss per epoch
            writer.add_scalar('{}_fold_running_loss'.format(k_fold + 1), running_loss, epoch + 1)

            ##for train accuracy per epoch
            train_acc_one_epoch /= len(Dataloader_train.dataset)
            writer.add_scalar('{}fold_train_acc'.format(k_fold + 1), train_acc_one_epoch, epoch + 1)
            train_acc_one_epoch = 0

            confusion_matrixx = torch.zeros(4, 4)

            for i, (images, targets) in enumerate(Dataloader_val):
                iter_val += 1
                net.eval()

                with torch.set_grad_enabled(False):
                    images = images.type('torch.FloatTensor')
                    targets = targets.type('torch.LongTensor')

                    images, targets = images.to(device), targets.to(device)
                    gt_pixelwise = torch.ones((2, 64, 64)).type('torch.LongTensor').to(device)
                    gt_pixelwise = torch.mul(gt_pixelwise, targets.view(-1, 1, 1))

                    scores = net(images)
                    pred_pixelwise = torch.argmax(scores, dim=1)

                    # for accuracy
                    predicts = torch.zeros((2,), dtype=torch.long).to(device)

                    for p in range(2):
                        zero_pixnum = (pred_pixelwise[p] == 0).sum()
                        one_pixnum = (pred_pixelwise[p] == 1).sum()
                        two_pixnum = (pred_pixelwise[p] == 2).sum()
                        thr_pixnum = (pred_pixelwise[p] == 3).sum()
                        predicts[p:p + 1] = torch.argmax(
                            torch.IntTensor([zero_pixnum, one_pixnum, two_pixnum, thr_pixnum]), dim=0)

                    true1_false0 = targets == predicts
                    val_acc_one_epoch += true1_false0.sum().item()

                    print('val mode | epoch : [%d/%d] | #fold : [%d/3] | iterations : [%d/%d]'
                          % (epoch + 1, whole_epoch, k_fold + 1, iter_val, total_iter_val[k_fold]))

                    ##for draw
                    # RGB, pred, GT
                    if i >= len(Dataloader_val) - 5:
                        print('let\'s draw!')
                        rand = np.random.randint(2)
                        rgb = images[rand].cpu().numpy()  # dtype = float, (1024, 1024, 3)
                        pred_pixelwise = pred_pixelwise[rand].cpu().numpy()  # dtype = # int64?, (64, 64)
                        GT = gt_pixelwise[rand].type(
                            'torch.cuda.LongTensor').cpu().numpy()  # dtype = long, (64, 64) either 0,1,2,3
                        GT[0][0] = 0
                        GT[0][1] = 1
                        GT[0][2] = 2
                        GT[0][3] = 3

                        fig = plt.figure(figsize=(9, 3))
                        ax1 = fig.add_subplot(131)
                        ax2 = fig.add_subplot(132)
                        ax3 = fig.add_subplot(133)

                        im1 = ax1.imshow(np.transpose(rgb, (1, 2, 0)) / 255)
                        im2 = ax2.imshow(pred_pixelwise, cmap=cmap, norm=norm, interpolation='none')
                        im3 = ax3.imshow(GT, cmap=cmap, norm=norm, interpolation='none')

                        fig.set_constrained_layout_pads(w_pad=2. / 72., h_pad=2. / 72.,
                                                        hspace=0., wspace=0.)
                        # CB = fig.colorbar(ax2, shrink=0.8, extend='both')

                        divider = make_axes_locatable(ax2)
                        cax = divider.append_axes('right', size='3%', pad=0.03)
                        fig.colorbar(im2, cax=cax, orientation='vertical')

                        writer.add_figure('Val|{}fold|{}epoch|last5_iter_figures'.format(k_fold + 1, epoch), fig,
                                          epoch + 1)

                    # confusion_matrix
                    for t, p in zip(targets.view(-1), predicts.view(-1)):
                        confusion_matrixx[t.long(), p.long()] += 1

            val_acc_one_epoch /= len(Dataloader_val.dataset)
            writer.add_scalar('{}fold_val_acc'.format(k_fold + 1), val_acc_one_epoch, epoch + 1)
            val_acc_whole_epoch.append(val_acc_one_epoch)

            fig = plot_confusion_matrix(confusion_matrixx, classes=['benign', 'cancer1', 'cancer2', 'cancer3'],
                                        title='ConFusionMaTrix')
            writer.add_figure('{}fold confusion_matrix'.format(k_fold + 1), fig, epoch + 1)
            val_acc_one_epoch = 0
            epoch += 1

            # Save the model
            if epoch > 50:
                if not os.path.isdir('./{}/{}fold'.format(Model, k_fold+1)):
                    os.mkdir('./{}/{}fold'.format(Model, k_fold+1))

                PATH = './{}/{}fold/epoch{} valAcc{}.tar'.format(Model, k_fold+1, epoch+1, val_acc_one_epoch)
                torch.save({
                     'epoch': epoch+1,
                     'model_state_dict': net.state_dict(),
                     'optimizer_state_dict': optimizer.state_dict(),
                     'scheduler_state_dict:':scheduler.state_dict()

                }, PATH)
Ejemplo n.º 3
0
    model.compile(optimizer=Adam(learning_rate=learning_rate),
                  loss=keras.losses.categorical_crossentropy,
                  metrics=['accuracy'])

    model.fit(x=train_batches,
              validation_data=valid_batches,
              steps_per_epoch=train_steps,
              validation_steps=valid_steps,
              epochs=epochs,
              shuffle=True,
              verbose=verbose)

# Save the Model
if os.path.isfile(
        '/home/anirudh/Documents/Keras_tutorials/dogs-vs-cats/vgg16.h5'
) is False:
    model.save('/home/anirudh/Documents/Keras_tutorials/dogs-vs-cats/vgg16.h5')

# Prediction usng the daved model
print("testing in progress")
print(test_batches.classes)
new_model = load_model(
    '/home/anirudh/Documents/Keras_tutorials/dogs-vs-cats/vgg16.h5')
print(new_model.summary())
prediction = new_model.predict(x=test_batches, verbose=1, steps=10)
print(np.round(prediction))
cm = confusion_matrix(y_true=test_batches.classes,
                      y_pred=np.argmax(prediction, axis=-1))
cm_plot_labels = ['cat', 'dog']
plot_confusion_matrix(cm=cm, classes=cm_plot_labels, title='Confusion Matrix')
print('Test set:', X_test.shape, y_test.shape)

# %% Modeling
# use balanced class weight due to the higher number of defaulters compared to non-defaulters
LR = LogisticRegression(solver='liblinear', class_weight='balanced')
LR.fit(X_train, y_train)
print(LR)

# predict
y_pred = LR.predict(X_test)
report = classification_report(y_test, y_pred)
matrix = confusion_matrix(y_test, y_pred)


# %%
plot_confusion_matrix(matrix, classes=['default=0', 'default=1'], normalize=True, title='Logistic Regression '
                                                                                        'Confusion matrix')
plt.savefig("Logistic Regression Confusion matrix.png", bbox_inches='tight')
plt.show()
print(f"Confusion Matrix:\n{matrix}")
print(f"Classification Report:\n{report}")

# %%
original_stdout = sys.stdout
with open("Model_Reports.txt", 'a') as f:
    sys.stdout = f
    print("*****Logistic Regression Report******\n")
    print("Confusion Matrix:")
    print(matrix)
    print("Classification Report:")
    print(report)
    sys.stdout = original_stdout
Ejemplo n.º 5
0
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print('Train set:', X_train.shape, y_train.shape)
print('Test set:', X_test.shape, y_test.shape)

# %% Modeling
RF = RandomForestClassifier(n_estimators=1000)
RF.fit(X_train, y_train)

# predict
y_pred = RF.predict(X_test)
report = classification_report(y_test, y_pred)
matrix = confusion_matrix(y_test, y_pred)

# %%
plot_confusion_matrix(matrix,
                      classes=['default=0', 'default=1'],
                      normalize=True,
                      title='Random Forest Confusion matrix')
plt.savefig("Random Forest Confusion matrix.png", bbox_inches='tight')
plt.show()
print(f"Confusion Matrix:\n{matrix}")
print(f"Classification Report:\n{report}")

# %%
original_stdout = sys.stdout
with open("Model_Reports.txt", 'a') as f:
    sys.stdout = f
    print("*****Random Forest Report******\n")
    print("Confusion Matrix:")
    print(matrix)
    print("Classification Report:")
    print(report)
Ejemplo n.º 6
0
    with open('model_cnn.pickle', 'rb') as f:
        classifier = pickle.load(f)

    (train_x, train_y), (test_x, test_y) = instrument_data_cnn.load_data()
    test_x = test_x.reshape(list(test_x.shape[:]) + [-1])

    evaluate(classifier, test_x, test_y)

    predictions = classifier.predict(test_x)
    prediction_list = []
    for pred in predictions:
        class_id = np.where(pred == max(pred))
        prediction_list.append(class_id[0][0])

    confusion_matrix = tf.compat.v2.math.confusion_matrix(
        test_y,
        prediction_list,
    )

    with tf.Session() as sess:
        np.set_printoptions(precision=2)

        cm = sess.run(confusion_matrix)
        plt.figure()
        plot_confusion_matrix(
            cm,
            classes=instrument_data.INSTRUMENTS,
            normalize=True,
        )
        plt.show()