示例#1
0
def predict(filename):
	with open(filename, 'rb') as f:
		model = pickle.load(f)

	print "Loading test images..."

	labels = ImageIO().load_labels()
	X,images = ImageIO().load_test_full()

	print "Performing diversive augmentation and prediction..."

	y_pred = diversive_augment(X, model)

	print "Writing to file..."

	df = DataFrame(np.round(y_pred, 8), index = images, columns = labels)
	df.index.name = 'image'
	df = df[header]
	df.to_csv('out.csv')

	print "Gzipping..."

	call("gzip -c out.csv > out.csv.gz", shell=True)

	print "Done! File saved to out.csv and out.csv.gz"
示例#2
0
def compute_validation_predictions(model_id, validation_set):
	d = importlib.import_module("nets.net_" + model_id)
	model, X, y = d.define_net()

	model.load_params_from(params.SAVE_URL + "/" + model_id + "/best_weights")

	# Lower batch size since TTA multiplies batch size by 16
	params.BATCH_SIZE = 32

	io = ImageIO()
	mean, std = io.load_mean_std()

    # Read training labels for the keys
	y = util.load_labels()
	keys = y.index.values

	model.batch_iterator_predict = TTABatchIterator(keys, params.BATCH_SIZE, std, mean, cv = True)
	print "TTAs per image: %i, augmented batch size: %i" % (model.batch_iterator_predict.ttas, model.batch_iterator_predict.ttas * params.BATCH_SIZE)

	padded_batches = ceil(validation_set.shape[0]/float(params.BATCH_SIZE))

	pred = model.predict_proba(validation_set)
	pred = pred.reshape(padded_batches, model.batch_iterator_predict.ttas, params.BATCH_SIZE)
	pred = np.mean(pred, axis = 1)
	pred = pred.reshape(padded_batches * params.BATCH_SIZE)

	# Remove padded lines
	pred = pred[:validation_set.shape[0]]

	return pred
示例#3
0
def fit(output):
    X, y = ImageIO().load_train_full()
    mean, std = ImageIO().load_mean_std()

    with open(output, 'rb') as f:
        net = pickle.load(f)

    net.on_epoch_finished = [
        AdjustVariable('update_learning_rate',
                       start=START_LEARNING_RATE *
                       10e-2),  # Reduced learning rate for this step
        EarlyStoppingNoValidation(training_loss_threshold=TRAINING_LOSS),
    ]

    net.eval_size = None

    net.fit(X, y)

    with open(output + "_full", 'wb') as f:
        pickle.dump(net, f, -1)
def define_net():
    define_net_specific_parameters()
    io = ImageIO()

    # Read pandas csv labels
    y = util.load_labels()

    if params.SUBSET is not 0:
        y = y[:params.SUBSET]

    X = np.arange(y.shape[0])

    mean, std = io.load_mean_std(circularized=params.CIRCULARIZED_MEAN_STD)
    keys = y.index.values

    if params.AUGMENT:
        train_iterator = AugmentingParallelBatchIterator(keys, params.BATCH_SIZE, std, mean, y_all = y)
    else:
        train_iterator = ParallelBatchIterator(keys, params.BATCH_SIZE, std, mean, y_all = y)

    test_iterator = ParallelBatchIterator(keys, params.BATCH_SIZE, std, mean, y_all = y)

    if params.REGRESSION:
        y = util.float32(y)
        y = y[:, np.newaxis]

    if 'gpu' in theano.config.device:
        # Half of coma does not support cuDNN, check whether we can use it on this node
        # If not, use cuda_convnet bindings
        from theano.sandbox.cuda.dnn import dnn_available
        if dnn_available():
            from lasagne.layers import dnn
            Conv2DLayer = dnn.Conv2DDNNLayer
            MaxPool2DLayer = dnn.MaxPool2DDNNLayer
        else:
            from lasagne.layers import cuda_convnet
            Conv2DLayer = cuda_convnet.Conv2DCCLayer
            MaxPool2DLayer = cuda_convnet.MaxPool2DCCLayer
    else:
        Conv2DLayer = layers.Conv2DLayer
        MaxPool2DLayer = layers.MaxPool2DLayer

    Maxout = layers.pool.FeaturePoolLayer

    net = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv0', Conv2DLayer),
            ('pool0', MaxPool2DLayer),
            ('conv1', Conv2DLayer),
            ('pool1', MaxPool2DLayer),
            ('conv2', Conv2DLayer),
            ('pool2', MaxPool2DLayer),
            ('conv3', Conv2DLayer),
            ('pool3', MaxPool2DLayer),
            ('conv4', Conv2DLayer),
            ('pool4', MaxPool2DLayer),
            ('dropouthidden1', layers.DropoutLayer),
            ('hidden1', layers.DenseLayer),
            ('maxout1', Maxout),
            ('dropouthidden2', layers.DropoutLayer),
            ('hidden2', layers.DenseLayer),
            ('maxout2', Maxout),
            ('dropouthidden3', layers.DropoutLayer),
            ('output', layers.DenseLayer),
        ],

        input_shape=(None, params.CHANNELS, params.PIXELS, params.PIXELS),

        conv0_num_filters=32, conv0_filter_size=(5, 5), conv0_stride=(2, 2), pool0_pool_size=(2, 2), pool0_stride=(2, 2),
        conv1_num_filters=64, conv1_filter_size=(5, 5), conv1_border_mode = 'same', pool1_pool_size=(2, 2), pool1_stride=(2, 2),
        conv2_num_filters=128, conv2_filter_size=(3, 3), conv2_border_mode = 'same', pool2_pool_size=(2, 2), pool2_stride=(2, 2),
        conv3_num_filters=192, conv3_filter_size=(3, 3), conv3_border_mode = 'same', pool3_pool_size=(2, 2), pool3_stride=(2, 2),
        conv4_num_filters=256, conv4_filter_size=(3, 3), conv4_border_mode = 'same', pool4_pool_size=(2, 2), pool4_stride=(2, 2),

        hidden1_num_units=1024,
        hidden2_num_units=1024,

        dropouthidden1_p=0.5,
        dropouthidden2_p=0.5,
        dropouthidden3_p=0.5,

        maxout1_pool_size=2,
        maxout2_pool_size=2,

        output_num_units=1 if params.REGRESSION else 5,
        output_nonlinearity=None if params.REGRESSION else nonlinearities.softmax,

        update_learning_rate=theano.shared(util.float32(params.START_LEARNING_RATE)),
        update_momentum=theano.shared(util.float32(params.MOMENTUM)),
        custom_score=('kappa', quadratic_kappa),

        regression=params.REGRESSION,
        batch_iterator_train=train_iterator,
        batch_iterator_test=test_iterator,
        on_epoch_finished=[
            AdjustVariable('update_learning_rate', start=params.START_LEARNING_RATE),
            stats.Stat(),
            ModelSaver()
        ],
        max_epochs=500,
        verbose=1,

        # Only relevant when create_validation_split = True
        eval_size=0.1,

        # Need to specify splits manually like indicated below!
        create_validation_split=params.SUBSET>0,
    )

    # It is recommended to use the same training/validation split every model for ensembling and threshold optimization
    #
    # To set specific training/validation split:
    net.X_train = np.load(params.IMAGE_SOURCE + "/X_train.npy")
    net.X_valid = np.load(params.IMAGE_SOURCE + "/X_valid.npy")
    net.y_train = np.load(params.IMAGE_SOURCE + "/y_train.npy")
    net.y_valid = np.load(params.IMAGE_SOURCE + "/y_valid.npy")

    return net, X, y
def predict(model_id, raw, validation, train, n_eyes, average_over_eyes):
    params.DISABLE_CUDNN = True
    params.MULTIPROCESS = False

    d = importlib.import_module("nets.net_" + model_id)
    model, X, y = d.define_net()
    model.load_params_from(params.SAVE_URL + "/" + model_id + "/best_weights")

    f = get_iter_func(model)

    # Decrease batch size because TTA increases it 16-fold
    # Uses too much memory otherwise
    params.BATCH_SIZE = 8

    io = ImageIO()
    mean, std = io.load_mean_std()

    if validation or train:
        y = util.load_labels()
    else:
        y = util.load_sample_submission()

    keys = y.index.values

    tta_bi = TTABatchIterator(keys,
                              params.BATCH_SIZE,
                              std,
                              mean,
                              cv=validation or train,
                              n_eyes=n_eyes)
    print "TTAs per image: %i, augmented batch size: %i" % (
        tta_bi.ttas, tta_bi.ttas * params.BATCH_SIZE * n_eyes)

    if validation:
        X_test = np.load(params.IMAGE_SOURCE + "/X_valid.npy")
    elif train:
        X_test = np.load(params.IMAGE_SOURCE + "/X_train.npy")
    else:
        X_test = np.arange(y.shape[0])

    padded_batches = ceil(X_test.shape[0] / float(params.BATCH_SIZE))

    pred = get_activations(X_test, tta_bi, f)

    concat_preds = []

    for batch_pred in pred:
        hidden = batch_pred[0]
        output = batch_pred[1]

        concat = np.concatenate([output, hidden], axis=1)

        #if average_over_eyes:
        #means = concat.reshape(concat.shape[0] / 2, 2, concat.shape[1])
        #means = means.mean(axis = 1)
        #means = np.repeat(means, 2, axis = 0)

        concat_preds.append(concat)

    pred = np.vstack(concat_preds)
    output_units = pred.shape[1]

    #pred = model.predict_proba(X_test)
    pred = pred.reshape(padded_batches, tta_bi.ttas, params.BATCH_SIZE,
                        output_units)
    pred = np.mean(pred, axis=1)
    pred = pred.reshape(padded_batches * params.BATCH_SIZE, output_units)

    # Remove padded lines
    pred = pred[:X_test.shape[0]]

    # Save unrounded
    #y.loc[keys] = pred

    if validation:
        filename = params.SAVE_URL + "/" + model_id + "/raw_predictions_validation.npy"
    elif train:
        filename = params.SAVE_URL + "/" + model_id + "/raw_predictions_train.npy"
    else:
        filename = params.SAVE_URL + "/" + model_id + "/raw_predictions_test.npy"

    np.save(filename, pred)
    #y.to_csv(filename)
    print "Saved raw predictions to " + filename

    if not raw and not validation and not train:
        W = np.load(params.SAVE_URL + "/" + model_id +
                    "/optimal_thresholds.npy")

        pred = weighted_round(pred, W)

        pred = pred[:, np.newaxis]  # add axis for pd compatability

        hist, _ = np.histogram(pred, bins=5)
        print "Distribution over class predictions on test set: ", hist / float(
            y.shape[0])

        y.loc[keys] = pred

        y.to_csv(params.SAVE_URL + "/" + model_id + "/submission.csv")

        print "Gzipping..."

        if not params.ON_COMA:
            call("gzip -c " + params.SAVE_URL + "/" + model_id +
                 "/submission.csv > " + params.SAVE_URL + "/" + model_id +
                 "/submission.csv.gz",
                 shell=True)

        print "Done! File saved to models/" + model_id + "/submission.csv"
def define_net():
    define_net_specific_parameters()
    io = ImageIO()

    # Read pandas csv labels
    y = util.load_labels()

    if params.SUBSET is not 0:
        y = y[:params.SUBSET]

    X = np.arange(y.shape[0])

    mean, std = io.load_mean_std(circularized=params.CIRCULARIZED_MEAN_STD)
    keys = y.index.values

    if params.AUGMENT:
        train_iterator = AugmentingParallelBatchIterator(keys,
                                                         params.BATCH_SIZE,
                                                         std,
                                                         mean,
                                                         y_all=y)
    else:
        train_iterator = ParallelBatchIterator(keys,
                                               params.BATCH_SIZE,
                                               std,
                                               mean,
                                               y_all=y)

    test_iterator = ParallelBatchIterator(keys,
                                          params.BATCH_SIZE,
                                          std,
                                          mean,
                                          y_all=y)

    if params.REGRESSION:
        y = util.float32(y)
        y = y[:, np.newaxis]

    if 'gpu' in theano.config.device:
        # Half of coma does not support cuDNN, check whether we can use it on this node
        # If not, use cuda_convnet bindings
        from theano.sandbox.cuda.dnn import dnn_available
        if dnn_available() and not params.DISABLE_CUDNN:
            from lasagne.layers import dnn
            Conv2DLayer = dnn.Conv2DDNNLayer
            MaxPool2DLayer = dnn.MaxPool2DDNNLayer
        else:
            from lasagne.layers import cuda_convnet
            Conv2DLayer = cuda_convnet.Conv2DCCLayer
            MaxPool2DLayer = cuda_convnet.MaxPool2DCCLayer
    else:
        Conv2DLayer = layers.Conv2DLayer
        MaxPool2DLayer = layers.MaxPool2DLayer

    Maxout = layers.pool.FeaturePoolLayer

    net = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv0', Conv2DLayer),
            ('pool0', MaxPool2DLayer),
            ('conv1', Conv2DLayer),
            ('pool1', MaxPool2DLayer),
            ('conv2', Conv2DLayer),
            ('pool2', MaxPool2DLayer),
            ('conv3', Conv2DLayer),
            ('pool3', MaxPool2DLayer),
            ('conv4', Conv2DLayer),
            ('pool4', MaxPool2DLayer),
            ('dropouthidden1', layers.DropoutLayer),
            ('hidden1', layers.DenseLayer),
            ('maxout1', Maxout),
            ('dropouthidden2', layers.DropoutLayer),
            ('hidden2', layers.DenseLayer),
            ('maxout2', Maxout),
            ('dropouthidden3', layers.DropoutLayer),
            ('output', layers.DenseLayer),
        ],
        input_shape=(None, params.CHANNELS, params.PIXELS, params.PIXELS),
        conv0_num_filters=32,
        conv0_filter_size=(5, 5),
        conv0_stride=(2, 2),
        pool0_pool_size=(2, 2),
        pool0_stride=(2, 2),
        conv1_num_filters=64,
        conv1_filter_size=(3, 3),
        conv1_border_mode='same',
        pool1_pool_size=(2, 2),
        pool1_stride=(2, 2),
        conv2_num_filters=128,
        conv2_filter_size=(3, 3),
        conv2_border_mode='same',
        pool2_pool_size=(2, 2),
        pool2_stride=(2, 2),
        conv3_num_filters=192,
        conv3_filter_size=(3, 3),
        conv3_border_mode='same',
        pool3_pool_size=(2, 2),
        pool3_stride=(2, 2),
        conv4_num_filters=256,
        conv4_filter_size=(3, 3),
        conv4_border_mode='same',
        pool4_pool_size=(2, 2),
        pool4_stride=(2, 2),
        hidden1_num_units=1024,
        hidden2_num_units=1024,
        dropouthidden1_p=0.5,
        dropouthidden2_p=0.5,
        dropouthidden3_p=0.5,
        maxout1_pool_size=2,
        maxout2_pool_size=2,
        output_num_units=1 if params.REGRESSION else 5,
        output_nonlinearity=None
        if params.REGRESSION else nonlinearities.softmax,
        update_learning_rate=theano.shared(
            util.float32(params.START_LEARNING_RATE)),
        update_momentum=theano.shared(util.float32(params.MOMENTUM)),
        custom_score=('kappa', quadratic_kappa),
        regression=params.REGRESSION,
        batch_iterator_train=train_iterator,
        batch_iterator_test=test_iterator,
        on_epoch_finished=[
            AdjustVariable('update_learning_rate',
                           start=params.START_LEARNING_RATE),
            stats.Stat(),
            ModelSaver()
        ],
        max_epochs=500,
        verbose=1,

        # Only relevant when create_validation_split = True
        eval_size=0.1,

        # Need to specify splits manually like indicated below!
        create_validation_split=params.SUBSET > 0,
    )

    # It is recommended to use the same training/validation split every model for ensembling and threshold optimization
    #
    # To set specific training/validation split:
    net.X_train = np.load(params.IMAGE_SOURCE + "/X_train.npy")
    net.X_valid = np.load(params.IMAGE_SOURCE + "/X_valid.npy")
    net.y_train = np.load(params.IMAGE_SOURCE + "/y_train.npy")
    net.y_valid = np.load(params.IMAGE_SOURCE + "/y_valid.npy")

    return net, X, y
def predict(model_id, raw, validation, train, n_eyes, average_over_eyes):
	params.DISABLE_CUDNN = True
	params.MULTIPROCESS = False

	d = importlib.import_module("nets.net_" + model_id)
	model, X, y = d.define_net()
	model.load_params_from(params.SAVE_URL + "/" + model_id + "/best_weights")

	f = get_iter_func(model)

	# Decrease batch size because TTA increases it 16-fold
	# Uses too much memory otherwise
	params.BATCH_SIZE = 8

	io = ImageIO()
	mean, std = io.load_mean_std()

	if validation or train:
		y = util.load_labels()
	else:
		y = util.load_sample_submission()

	keys = y.index.values

	tta_bi = TTABatchIterator(keys, params.BATCH_SIZE, std, mean, cv = validation or train, n_eyes = n_eyes)
	print "TTAs per image: %i, augmented batch size: %i" % (tta_bi.ttas, tta_bi.ttas * params.BATCH_SIZE * n_eyes)

	if validation:
		X_test = np.load(params.IMAGE_SOURCE + "/X_valid.npy")
	elif train:
		X_test = np.load(params.IMAGE_SOURCE + "/X_train.npy")
	else:
		X_test = np.arange(y.shape[0])

	padded_batches = ceil(X_test.shape[0]/float(params.BATCH_SIZE))

	pred = get_activations(X_test, tta_bi, f)
	
	concat_preds = []

	for batch_pred in pred:
		hidden = batch_pred[0]
		output = batch_pred[1]

		concat = np.concatenate([output, hidden], axis = 1)

		#if average_over_eyes:
			#means = concat.reshape(concat.shape[0] / 2, 2, concat.shape[1])
			#means = means.mean(axis = 1)
			#means = np.repeat(means, 2, axis = 0)

		concat_preds.append(concat)

	pred = np.vstack(concat_preds)
	output_units = pred.shape[1]

	#pred = model.predict_proba(X_test)
	pred = pred.reshape(padded_batches, tta_bi.ttas, params.BATCH_SIZE, output_units)
	pred = np.mean(pred, axis = 1)
	pred = pred.reshape(padded_batches * params.BATCH_SIZE, output_units)

	# Remove padded lines
	pred = pred[:X_test.shape[0]]

	# Save unrounded
	#y.loc[keys] = pred

	if validation:
		filename = params.SAVE_URL + "/" + model_id + "/raw_predictions_validation.npy"
	elif train:
		filename = params.SAVE_URL + "/" + model_id + "/raw_predictions_train.npy"
	else:
		filename = params.SAVE_URL + "/" + model_id + "/raw_predictions_test.npy"

	np.save(filename, pred)
	#y.to_csv(filename)
	print "Saved raw predictions to " + filename

	if not raw and not validation and not train:
		W = np.load(params.SAVE_URL + "/" + model_id + "/optimal_thresholds.npy")

		pred = weighted_round(pred, W)

		pred = pred[:, np.newaxis] # add axis for pd compatability

		hist, _ = np.histogram(pred, bins=5)
		print "Distribution over class predictions on test set: ", hist / float(y.shape[0])

		y.loc[keys] = pred

		y.to_csv(params.SAVE_URL + "/" + model_id + "/submission.csv")

		print "Gzipping..."

		if not params.ON_COMA:
			call("gzip -c " + params.SAVE_URL + "/" + model_id + "/submission.csv > " + params.SAVE_URL + "/" + model_id + "/submission.csv.gz", shell=True)

		print "Done! File saved to models/" + model_id + "/submission.csv"