Beispiel #1
0
def train_single_with_warmup(train_dir, test_dir, network=build_rgb_cnn, num_epochs=400, flip=200, input_size=45,
                             learning_rate=0.005, gray=False, augment=True):
    assert (num_epochs > flip)

    train_images = load_images(train_dir, is_train=True, permute=False)
    training_labels = list([img.label for img in train_images])
    classes_set = list(sorted(set(training_labels)))
    class_to_index = {key: index for index, key in enumerate(classes_set)}

    print('Building network...')
    input_var = T.tensor4('inputs')
    target_var = T.ivector('targets')
    neural_network = network(input_size, input_var)
    print_network(neural_network)
    train_fn, val_fn, predict_fn = build_network(neural_network, input_var, target_var, learning_rate=learning_rate)

    if flip > 0:
        # Create validation and training set
        (trainset, valset) = split_special(train_images, 2, True)[0]  # Use the old validation way

        if augment:
            print("Augmenting trainset images")
            trainset = augmentation(trainset)
            print("Augmented to %d images" % len(trainset))

        print('Post processing train and validation set')
        postprocess(trainset, size=input_size, grayscale=gray)
        postprocess(valset, size=input_size, grayscale=gray)

        x_train = images_to_vectors(trainset, input_size, num_dimensions=1 if gray else 3)
        x_val = images_to_vectors(valset, input_size, num_dimensions=1 if gray else 3)

        for img in train_images:
            img.disposeImage()

        y_train = np.concatenate(np.array([[class_to_index[img.label] for img in trainset]], dtype=np.uint8))
        y_val = np.concatenate(np.array([[class_to_index[img.label] for img in valset]], dtype=np.uint8))
        print('Training %d iterations as a warmup' % flip)
        train(train_fn, val_fn, predict_fn, x_train, y_train, x_val, y_val, flip, show_validation=True)

    # Flip or fully train
    train_images = load_images(train_dir, is_train=True, permute=False)
    train_images = augmentation(train_images)
    postprocess(train_images, input_size)
    x_train = images_to_vectors(train_images, input_size)
    y_train = np.concatenate(np.array([[class_to_index[img.label] for img in train_images]], dtype=np.uint8))

    print('Training %d iterations on full set' % (num_epochs - flip))
    train(train_fn, val_fn, predict_fn, x_train, y_train,
          None if flip <= 0 else x_val, None if flip <= 0 else y_val,
          num_epochs if flip <= 0 else num_epochs - flip,
          show_validation=False)

    # Predict
    print('Predicting testset')
    test_images = sorted(load_images(test_dir, is_train=False, permute=False), key=lambda x: x.identifier)
    postprocess(test_images, size=input_size, grayscale=gray)
    x_test = images_to_vectors(test_images, input_size, num_dimensions=1 if gray else 3)
    predictions = predict(predict_fn, x_test)
    print('Writing to CSV...')
    write_csv(test_images, predictions, classes_set, filename='result_warmup.csv')
    print('Finished.')
Beispiel #2
0
def train_single_with_warmup(train_dir,
                             test_dir,
                             network=build_rgb_cnn,
                             num_epochs=400,
                             flip=200,
                             input_size=45,
                             learning_rate=0.005,
                             gray=False,
                             augment=True):
    assert (num_epochs > flip)

    train_images = load_images(train_dir, is_train=True, permute=False)
    training_labels = list([img.label for img in train_images])
    classes_set = list(sorted(set(training_labels)))
    class_to_index = {key: index for index, key in enumerate(classes_set)}

    print('Building network...')
    input_var = T.tensor4('inputs')
    target_var = T.ivector('targets')
    neural_network = network(input_size, input_var)
    print_network(neural_network)
    train_fn, val_fn, predict_fn = build_network(neural_network,
                                                 input_var,
                                                 target_var,
                                                 learning_rate=learning_rate)

    if flip > 0:
        # Create validation and training set
        (trainset,
         valset) = split_special(train_images, 2,
                                 True)[0]  # Use the old validation way

        if augment:
            print("Augmenting trainset images")
            trainset = augmentation(trainset)
            print("Augmented to %d images" % len(trainset))

        print('Post processing train and validation set')
        postprocess(trainset, size=input_size, grayscale=gray)
        postprocess(valset, size=input_size, grayscale=gray)

        x_train = images_to_vectors(trainset,
                                    input_size,
                                    num_dimensions=1 if gray else 3)
        x_val = images_to_vectors(valset,
                                  input_size,
                                  num_dimensions=1 if gray else 3)

        for img in train_images:
            img.disposeImage()

        y_train = np.concatenate(
            np.array([[class_to_index[img.label] for img in trainset]],
                     dtype=np.uint8))
        y_val = np.concatenate(
            np.array([[class_to_index[img.label] for img in valset]],
                     dtype=np.uint8))
        print('Training %d iterations as a warmup' % flip)
        train(train_fn,
              val_fn,
              predict_fn,
              x_train,
              y_train,
              x_val,
              y_val,
              flip,
              show_validation=True)

    # Flip or fully train
    train_images = load_images(train_dir, is_train=True, permute=False)
    train_images = augmentation(train_images)
    postprocess(train_images, input_size)
    x_train = images_to_vectors(train_images, input_size)
    y_train = np.concatenate(
        np.array([[class_to_index[img.label] for img in train_images]],
                 dtype=np.uint8))

    print('Training %d iterations on full set' % (num_epochs - flip))
    train(train_fn,
          val_fn,
          predict_fn,
          x_train,
          y_train,
          None if flip <= 0 else x_val,
          None if flip <= 0 else y_val,
          num_epochs if flip <= 0 else num_epochs - flip,
          show_validation=False)

    # Predict
    print('Predicting testset')
    test_images = sorted(load_images(test_dir, is_train=False, permute=False),
                         key=lambda x: x.identifier)
    postprocess(test_images, size=input_size, grayscale=gray)
    x_test = images_to_vectors(test_images,
                               input_size,
                               num_dimensions=1 if gray else 3)
    predictions = predict(predict_fn, x_test)
    print('Writing to CSV...')
    write_csv(test_images,
              predictions,
              classes_set,
              filename='result_warmup.csv')
    print('Finished.')
Beispiel #3
0
def cross_validate_neural(train_dir, networks, weights, epochs, input_sizes, learning_rates, grays, num_folds=5, augment=True):
    print('Cross-validation using %d folds' % num_folds)
    if isinstance(weights, tuple) and len(weights) > 1:
        print('Using weights %s' % str.join(',', [str(x) for x in weights]))

    train_images = load_images(train_dir, is_train=True, permute=False)
    training_labels = list([img.label for img in train_images])
    classes_set = list(sorted(set(training_labels)))
    class_to_index = {key: index for index, key in enumerate(classes_set)}

    if not isinstance(networks, tuple):
        networks = (networks)
    if not isinstance(weights, tuple):
        weights = (weights)
    if not isinstance(epochs, tuple):
        epochs = (epochs)
    if not isinstance(input_sizes, tuple):
        input_sizes = (input_sizes)
    if not isinstance(learning_rates, tuple):
        learning_rates = (learning_rates)
    if not isinstance(grays, tuple):
        grays = (grays)

    # Create validation and training set
    val_losses = []
    train_losses = []
    val_accs = []

    for trainset, valset in split_special(train_images, num_folds, num_folds == 2):

        # For each network in ensemble (if available)
        predictions = []
        print('Evaluating fold... (%d models in ensemble)' % len(weights))
        if augment:
            trainset = augmentation(trainset)
            print("Augmented to %d images" % len(trainset))

        # Convert classes to uin8 indices
        y_train = np.concatenate(np.array([[class_to_index[img.label] for img in trainset]], dtype=np.uint8))
        y_val = np.concatenate(np.array([[class_to_index[img.label] for img in valset]], dtype=np.uint8))

        for input_size, network, num_epochs, gray, learning_rate in zip(input_sizes, networks, epochs,
                                                                    grays, learning_rates):
            # Postprocess images
            postprocess(trainset, size=input_size, grayscale=gray)
            postprocess(valset, size=input_size, grayscale=gray)

            # Get their numpy vectors to feed to lasagne
            x_train = images_to_vectors(trainset, input_size, num_dimensions=1 if gray else 3)
            x_val = images_to_vectors(valset, input_size, num_dimensions=1 if gray else 3)

            # Prepare theano tensors for classes and inputs
            input_var = T.tensor4('inputs')
            target_var = T.ivector('targets')

            neural_network = network(input_size, input_var)
            train_fn, val_fn, predict_fn = build_network(neural_network, input_var, target_var)
            training_loss, val_loss, val_acc = train(train_fn, val_fn, predict_fn, x_train, y_train, x_val, y_val,
                                                     num_epochs)

            if len(weights) <= 1:
                val_losses.append(val_loss)
                train_losses.append(training_loss)
                val_accs.append(val_acc)
            else:
                weighted_logloss = predict(predict_fn, x_val)
                print('Weighted logloss of fold: %f' % weighted_logloss)
                predictions.append(weighted_logloss)
        # Calculate weighted logloss for this fold
        if len(weights) > 1:
            val_losses.append(logloss(predictions, weights, y_val, num_classes=81))

    # Process metrics
    if len(weights) <= 1:
        mean_val_loss = np.mean(val_losses)
        mean_train_loss = np.mean(train_losses)
        mean_accuracy = np.mean(val_accs)
        std_val_loss = np.std(val_losses)
        std_val_acc = np.std(val_accs)
        std_train_loss = np.std(train_losses)

        print('Mean validation loss: %f (std: %f)' % (mean_val_loss, std_val_loss))
        print('Mean training loss: %f (std: %f)' % (mean_train_loss, std_train_loss))
        print('Mean training accuracy: %f (std %f)' % (mean_accuracy, std_val_acc))
    else:
        print('Weighted logloss: %f (std %f)' %(np.mean(val_losses), np.std(val_losses)))
Beispiel #4
0
def cross_validate_neural(train_dir,
                          networks,
                          weights,
                          epochs,
                          input_sizes,
                          learning_rates,
                          grays,
                          num_folds=5,
                          augment=True):
    print('Cross-validation using %d folds' % num_folds)
    if isinstance(weights, tuple) and len(weights) > 1:
        print('Using weights %s' % str.join(',', [str(x) for x in weights]))

    train_images = load_images(train_dir, is_train=True, permute=False)
    training_labels = list([img.label for img in train_images])
    classes_set = list(sorted(set(training_labels)))
    class_to_index = {key: index for index, key in enumerate(classes_set)}

    if not isinstance(networks, tuple):
        networks = (networks)
    if not isinstance(weights, tuple):
        weights = (weights)
    if not isinstance(epochs, tuple):
        epochs = (epochs)
    if not isinstance(input_sizes, tuple):
        input_sizes = (input_sizes)
    if not isinstance(learning_rates, tuple):
        learning_rates = (learning_rates)
    if not isinstance(grays, tuple):
        grays = (grays)

    # Create validation and training set
    val_losses = []
    train_losses = []
    val_accs = []

    for trainset, valset in split_special(train_images, num_folds,
                                          num_folds == 2):

        # For each network in ensemble (if available)
        predictions = []
        print('Evaluating fold... (%d models in ensemble)' % len(weights))
        if augment:
            trainset = augmentation(trainset)
            print("Augmented to %d images" % len(trainset))

        # Convert classes to uin8 indices
        y_train = np.concatenate(
            np.array([[class_to_index[img.label] for img in trainset]],
                     dtype=np.uint8))
        y_val = np.concatenate(
            np.array([[class_to_index[img.label] for img in valset]],
                     dtype=np.uint8))

        for input_size, network, num_epochs, gray, learning_rate in zip(
                input_sizes, networks, epochs, grays, learning_rates):
            # Postprocess images
            postprocess(trainset, size=input_size, grayscale=gray)
            postprocess(valset, size=input_size, grayscale=gray)

            # Get their numpy vectors to feed to lasagne
            x_train = images_to_vectors(trainset,
                                        input_size,
                                        num_dimensions=1 if gray else 3)
            x_val = images_to_vectors(valset,
                                      input_size,
                                      num_dimensions=1 if gray else 3)

            # Prepare theano tensors for classes and inputs
            input_var = T.tensor4('inputs')
            target_var = T.ivector('targets')

            neural_network = network(input_size, input_var)
            train_fn, val_fn, predict_fn = build_network(
                neural_network, input_var, target_var)
            training_loss, val_loss, val_acc = train(train_fn, val_fn,
                                                     predict_fn, x_train,
                                                     y_train, x_val, y_val,
                                                     num_epochs)

            if len(weights) <= 1:
                val_losses.append(val_loss)
                train_losses.append(training_loss)
                val_accs.append(val_acc)
            else:
                weighted_logloss = predict(predict_fn, x_val)
                print('Weighted logloss of fold: %f' % weighted_logloss)
                predictions.append(weighted_logloss)
        # Calculate weighted logloss for this fold
        if len(weights) > 1:
            val_losses.append(
                logloss(predictions, weights, y_val, num_classes=81))

    # Process metrics
    if len(weights) <= 1:
        mean_val_loss = np.mean(val_losses)
        mean_train_loss = np.mean(train_losses)
        mean_accuracy = np.mean(val_accs)
        std_val_loss = np.std(val_losses)
        std_val_acc = np.std(val_accs)
        std_train_loss = np.std(train_losses)

        print('Mean validation loss: %f (std: %f)' %
              (mean_val_loss, std_val_loss))
        print('Mean training loss: %f (std: %f)' %
              (mean_train_loss, std_train_loss))
        print('Mean training accuracy: %f (std %f)' %
              (mean_accuracy, std_val_acc))
    else:
        print('Weighted logloss: %f (std %f)' %
              (np.mean(val_losses), np.std(val_losses)))