示例#1
0
def train(output_dir='outputs', kernel='linear', penalty=1.0):
    # make sure output directory exist
    os.makedirs(output_dir, exist_ok=True)

    # Safely get the Azure ML run
    run = get_AMLRun()

    # loading the iris dataset
    iris = datasets.load_iris()

    # X -> features, y -> label
    X = iris.data
    y = iris.target
    class_names = iris.target_names

    # dividing X, y into train and test data. Random seed for reproducability
    X_train, X_test, y_train, y_test = \
        train_test_split(X, y, test_size=0.20, random_state=0)

    # create our model - a linear SVM classifier
    svm_model_linear = SVC(kernel=kernel, C=penalty)

    # evaluate each model in turn
    kfold = StratifiedKFold(n_splits=10, random_state=1)
    cv_results = cross_val_score(svm_model_linear,
                                 X_train,
                                 y_train,
                                 cv=kfold,
                                 scoring='accuracy')

    print('Cross Validation Mean: ', cv_results.mean())
    print('Cross Validation Std: ', cv_results.std())
    if run is not None:
        run.log_list('Cross Validation Accuracies', cv_results)
        run.log('Cross Validation Mean', cv_results.mean())
        run.log('Cross Validation Std', cv_results.std())

    # now training on the full dataset
    svm_model_linear.fit(X_train, y_train)
    y_pred = svm_model_linear.predict(X_test)

    # model accuracy for X_test
    accuracy = svm_model_linear.score(X_test, y_test)
    print('Accuracy of SVM classifier on test set: {:.2f}'.format(accuracy))
    if run is not None:
        run.log('Accuracy', np.float(accuracy))

    # Plot non-normalized confusion matrix
    title = 'Test confusion matrix'
    disp = plot_confusion_matrix(svm_model_linear,
                                 X_test,
                                 y_test,
                                 display_labels=class_names,
                                 cmap=plt.cm.Blues)
    disp.ax_.set_title(title)
    print(title)
    print(disp.confusion_matrix)

    if run is not None:
        run.log_image(title, plot=plt)
    else:
        plt.savefig(os.path.join(output_dir, 'confusion_matrix.png'))

    # Plot normalized confusion matrix
    title = 'Normalized test confusion matrix'
    disp = plot_confusion_matrix(svm_model_linear,
                                 X_test,
                                 y_test,
                                 display_labels=class_names,
                                 cmap=plt.cm.Blues,
                                 normalize='true')
    disp.ax_.set_title(title)
    print(title)
    print(disp.confusion_matrix)

    if run is not None:
        run.log_image(title, plot=plt)
    else:
        plt.savefig(os.path.join(output_dir,
                                 'confusion_matrix_normalised.png'))

    # Print classification report
    print(classification_report(y_test, y_pred))

    # files saved in the "outputs" folder are automatically uploaded into
    # Azure ML Service run history
    model_folder = os.path.join(output_dir, 'model')
    model_path = os.path.join(model_folder, 'mimic-iv.joblib')
    os.makedirs(model_folder, exist_ok=True)
    joblib.dump(svm_model_linear, model_path)
    print('Output saved to', output_dir)
示例#2
0
    output = tf.layers.dense(h2, n_outputs, name='output')

with tf.name_scope('train'):
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=y, logits=output)
    loss = tf.reduce_mean(cross_entropy, name='loss')
    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    train_op = optimizer.minimize(loss)

with tf.name_scope('eval'):
    correct = tf.nn.in_top_k(output, y, 1)
    acc_op = tf.reduce_mean(tf.cast(correct, tf.float32))

init = tf.global_variables_initializer()
saver = tf.train.Saver()
run = get_AMLRun()

with tf.Session() as sess:
    init.run()
    for epoch in range(n_epochs):

        # randomly shuffle training set
        indices = np.random.permutation(training_set_size)
        X_train = X_train[indices]
        y_train = y_train[indices]

        # batch index
        b_end = batch_size
        for b_start in range(0, training_set_size, batch_size):
            # get a batch
            X_batch, y_batch = X_train[b_start:b_end], y_train[b_start:b_end]
示例#3
0
def train():
    n_epochs = 10
    batch_size = 32
    lr = 0.001
    device = "cpu"

    ndvi_train_dataset, ndvi_dev_dataset = NDVIDataSet("train"), NDVIDataSet(
        "dev")
    ndvi_test_dataset = NDVIDataSet("test")
    ndvi_dataset = ConcatDataset([ndvi_train_dataset, ndvi_dev_dataset])
    logging.info("Initialized datasets")

    ndvi_dataloader = DataLoader(ndvi_dataset, batch_size=batch_size)
    ndvi_testloader = DataLoader(ndvi_test_dataset, batch_size=batch_size)
    logging.info("Initialized dataloaders")

    ndvi_convnet = NDVIConvNet()
    ndvi_convnet.to(device)
    logging.info("Initialized Model")

    loss = nn.BCEWithLogitsLoss(reduction="mean")
    optimizer = torch.optim.Adam(ndvi_convnet.parameters(), lr=lr)

    run = get_AMLRun()
    logging.info("Starting run")
    for n_iter in range(n_epochs):
        epoch_loss, epoch_acc = 0, 0
        for iter, (matrix, label) in enumerate(ndvi_dataloader):
            matrix, label = matrix.to(device), label.to(device)
            logprobs = ndvi_convnet(matrix)

            batch_loss = loss(logprobs, label)

            ndvi_convnet.zero_grad()
            batch_loss.backward()
            optimizer.step()

            epoch_loss += batch_loss.cpu().item()
            y_pred = torch.sigmoid(logprobs)

            pred_y = y_pred >= 0.5  # a Tensor of 0s and 1s
            num_correct = torch.sum(label == pred_y)
            epoch_acc += num_correct.item()

        logging.info("Epoch Loss: ", epoch_loss)
        logging.info("Epoch Acc: ", epoch_acc / len(ndvi_dataset))

        if n_iter % 5 == 0:
            test_acc = 0
            logging.info("Test Acc")
            for iter, (matrix, label) in enumerate(ndvi_testloader):
                with torch.no_grad():
                    matrix, label = matrix.to(device), label.to(device)
                    logprobs = ndvi_convnet(matrix)

                    y_pred = torch.sigmoid(logprobs)
                    pred_y = y_pred >= 0.5  # a Tensor of 0s and 1s
                    num_correct = torch.sum(label == pred_y)  # a Tensor
                    test_acc += num_correct.item()

            if run is not None:
                run.log("Test Acc : ", test_acc / len(ndvi_test_dataset))

        if run is not None:
            run.log('Train Loss', epoch_loss)
            run.log('Training Accuracy', np.float(epoch_acc))

    logging.info("Saving Model")
    torch.save(ndvi_convnet.state_dict(), open("./models/ndvi-mlmodel.pt",
                                               "wb"))