Exemplo n.º 1
0
def build_model():
    metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
    metadata_path = utils.find_model_metadata(
        metadata_dir,
        patch_class_config.__name__.split('.')[-1])
    print('loading model', metadata_path)
    print('please check if model pkl is the correct one')
    metadata = utils.load_pkl(metadata_path)

    print('Build model')
    model = patch_class_config.build_model()
    all_layers = nn.layers.get_all_layers(model.l_out)
    num_params = nn.layers.count_params(model.l_out)
    print('  number of parameters: %d' % num_params)
    print(string.ljust('  layer output shapes:', 36), )
    print(string.ljust('#params:', 10), )
    print('output shape:')
    for layer in all_layers:
        name = string.ljust(layer.__class__.__name__, 32)
        num_param = sum(
            [np.prod(p.get_value().shape) for p in layer.get_params()])
        num_param = string.ljust(num_param.__str__(), 10)
        print('    %s %s %s' % (name, num_param, layer.output_shape))

    nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
    return model
def build_model():
    metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
    metadata_path = utils.find_model_metadata(
        metadata_dir,
        patch_config.__name__.split('.')[-1])
    metadata = utils.load_pkl(metadata_path)

    print 'Build model'
    model = patch_config.build_model(patch_size=(window_size, window_size,
                                                 window_size))
    all_layers = nn.layers.get_all_layers(model.l_out)
    num_params = nn.layers.count_params(model.l_out)
    print '  number of parameters: %d' % num_params
    print string.ljust('  layer output shapes:', 36),
    print string.ljust('#params:', 10),
    print 'output shape:'
    for layer in all_layers:
        name = string.ljust(layer.__class__.__name__, 32)
        num_param = sum(
            [np.prod(p.get_value().shape) for p in layer.get_params()])
        num_param = string.ljust(num_param.__str__(), 10)
        print '    %s %s %s' % (name, num_param, layer.output_shape)

    nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
    return model
Exemplo n.º 3
0
def build_segmentation_model(l_in):
    metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
    metadata_path = utils.find_model_metadata(metadata_dir, patch_segmentation_config.__name__.split('.')[-1])
    metadata = utils.load_pkl(metadata_path)

    model = patch_segmentation_config.build_model(l_in=l_in, patch_size=p_transform['patch_size'])
    nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
    return model
Exemplo n.º 4
0
def process_config_data(config_name, n_points):
    save_dir = utils.find_model_metadata('metadata/', config_name)
    with open(save_dir + '/meta.pkl', 'rb') as f:
        d1 = pickle.load(f)

    l_train_iter1 = np.squeeze(d1['losses_train_iter'])
    print(len(l_train_iter1))
    l_train_iter1 = moving_average(l_train_iter1, n=n_points)
    return l_train_iter1
Exemplo n.º 5
0
def build_segmentation_model(l_in):
    metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
    metadata_path = utils.find_model_metadata(
        metadata_dir,
        patch_segmentation_config.__name__.split('.')[-1])
    metadata = utils.load_pkl(metadata_path)

    model = patch_segmentation_config.build_model(
        l_in=l_in, patch_size=p_transform['patch_size'])
    nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
    return model
Exemplo n.º 6
0
def build_model():
    l_in = nn.layers.InputLayer((
        None,
        n_candidates_per_patient,
        1,
    ) + p_transform['patch_size'])
    l_in_rshp = nn.layers.ReshapeLayer(l_in, (
        -1,
        1,
    ) + p_transform['patch_size'])
    l_target = nn.layers.InputLayer((batch_size, ))

    base_n_filters = 128
    l = conv_prelu_layer(l_in_rshp, n_filters=base_n_filters)
    l = conv_prelu_layer(l, n_filters=base_n_filters)
    l = conv_prelu_layer(l, n_filters=base_n_filters)

    l = max_pool3d(l)

    l = conv_prelu_layer(l, n_filters=base_n_filters)
    l = conv_prelu_layer(l, n_filters=base_n_filters)
    l = conv_prelu_layer(l, n_filters=base_n_filters)
    l_enc = conv_prelu_layer(l, n_filters=base_n_filters)

    num_units_dense = 512
    l_d01 = dense_prelu_layer(l, num_units=512)
    l_d01 = nn.layers.ReshapeLayer(
        l_d01, (-1, n_candidates_per_patient, num_units_dense))
    l_d02 = dense_prelu_layer(l_d01, num_units=512)
    l_out = nn.layers.DenseLayer(
        l_d02,
        num_units=2,
        W=nn.init.Constant(0.),
        b=np.array([np.log((1397. - 362) / 1398),
                    np.log(362. / 1397)],
                   dtype='float32'),
        nonlinearity=nn.nonlinearities.softmax)

    metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
    metadata_path = utils.find_model_metadata(metadata_dir, 'luna_p8a1')
    metadata = utils.load_pkl(metadata_path)
    for p, pv in zip(nn.layers.get_all_params(l_enc),
                     metadata['param_values']):
        if p.get_value().shape != pv.shape:
            raise ValueError("mismatch: parameter has shape %r but value to "
                             "set has shape %r" %
                             (p.get_value().shape, pv.shape))
        p.set_value(pv)

    return namedtuple('Model', ['l_in', 'l_out', 'l_target'])(l_in, l_out,
                                                              l_target)
Exemplo n.º 7
0
def build_model():
    net = Net()

    config_name = "f87_pt"
    metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
    metadata_path = utils.find_model_metadata(metadata_dir,
                                              config_name,
                                              best=True)
    metadata = utils.load_pkl(metadata_path)
    net.load_state_dict(metadata['param_values'])

    net.densenet.classifier = nn.Linear(net.densenet.classifier.in_features,
                                        p_transform["n_labels"])
    net.densenet.classifier.weight.data.zero_()

    return namedtuple('Model', ['l_out'])(net)
Exemplo n.º 8
0
def build_model():
    metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
    metadata_path = utils.find_model_metadata(metadata_dir, patch_class_config.__name__.split('.')[-1])
    metadata = utils.load_pkl(metadata_path)

    print 'Build model'
    model = patch_class_config.build_model()
    all_layers = nn.layers.get_all_layers(model.l_out)
    num_params = nn.layers.count_params(model.l_out)
    print '  number of parameters: %d' % num_params
    print string.ljust('  layer output shapes:', 36),
    print string.ljust('#params:', 10),
    print 'output shape:'
    for layer in all_layers:
        name = string.ljust(layer.__class__.__name__, 32)
        num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
        num_param = string.ljust(num_param.__str__(), 10)
        print '    %s %s %s' % (name, num_param, layer.output_shape)

    nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
    return model
Exemplo n.º 9
0
def build_model():
    l_in = nn.layers.InputLayer((None, n_candidates_per_patient, 1,) + p_transform['patch_size'])
    l_in_rshp = nn.layers.ReshapeLayer(l_in, (-1, 1,) + p_transform['patch_size'])
    l_target = nn.layers.InputLayer((batch_size,))

    base_n_filters = 128
    l = conv_prelu_layer(l_in_rshp, n_filters=base_n_filters)
    l = conv_prelu_layer(l, n_filters=base_n_filters)
    l = conv_prelu_layer(l, n_filters=base_n_filters)

    l = max_pool3d(l)

    l = conv_prelu_layer(l, n_filters=base_n_filters)
    l = conv_prelu_layer(l, n_filters=base_n_filters)
    l = conv_prelu_layer(l, n_filters=base_n_filters)
    l_enc = conv_prelu_layer(l, n_filters=base_n_filters)

    num_units_dense = 512
    l_d01 = dense_prelu_layer(l, num_units=512)
    l_d01 = nn.layers.ReshapeLayer(l_d01, (-1, n_candidates_per_patient, num_units_dense))
    l_d02 = dense_prelu_layer(l_d01, num_units=512)
    l_out = nn.layers.DenseLayer(l_d02, num_units=2,
                                 W=nn.init.Constant(0.),
                                 b=np.array([np.log((1397. - 362) / 1398), np.log(362. / 1397)], dtype='float32'),
                                 nonlinearity=nn.nonlinearities.softmax)

    metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
    metadata_path = utils.find_model_metadata(metadata_dir, 'luna_p8a1')
    metadata = utils.load_pkl(metadata_path)
    for p, pv in zip(nn.layers.get_all_params(l_enc), metadata['param_values']):
        if p.get_value().shape != pv.shape:
            raise ValueError("mismatch: parameter has shape %r but value to "
                             "set has shape %r" %
                             (p.get_value().shape, pv.shape))
        p.set_value(pv)

    return namedtuple('Model', ['l_in', 'l_out', 'l_target'])(l_in, l_out, l_target)
Exemplo n.º 10
0
# -----------------------------------------------------------------------------
np.random.seed(seed=42)
tf.reset_default_graph()
tf.set_random_seed(args.tf_seed)

# config
configs_dir = __file__.split('/')[-2]
config = importlib.import_module('%s.%s' % (configs_dir, args.config_name))
if not args.resume:
    experiment_id = '%s-%s' % (args.config_name.split('.')[-1],
                               time.strftime("%Y_%m_%d", time.localtime()))
    utils.autodir('metadata')
    save_dir = 'metadata/' + experiment_id
    utils.autodir(save_dir)
else:
    save_dir = utils.find_model_metadata('metadata/', args.config_name)
    experiment_id = os.path.dirname(save_dir).split('/')[-1]
    with open(save_dir + '/meta.pkl', 'rb') as f:
        resumed_metadata = pickle.load(f)
        last_lr = resumed_metadata['lr']
        last_iteration = resumed_metadata['iteration']
        print('Last iteration', last_iteration)
        print('Last learning rate', last_lr)

# logs
utils.autodir('logs')
sys.stdout = logger.Logger('logs/%s.log' % experiment_id)
sys.stderr = sys.stdout

print('exp_id', experiment_id)
if args.resume:
Exemplo n.º 11
0
from configuration import config, set_configuration
from utils_plots import plot_slice_3d_3
import utils_lung
import logger

theano.config.warn_float64 = 'raise'

if len(sys.argv) < 2:
    sys.exit("Usage: train.py <configuration_name>")

config_name = sys.argv[1]
set_configuration('configs_seg_patch', config_name)

# metadata
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, config_name)

metadata = utils.load_pkl(metadata_path)
expid = metadata['experiment_id']

# logs
logs_dir = utils.get_dir_path('logs', pathfinder.METADATA_PATH)
sys.stdout = logger.Logger(logs_dir + '/%s-test.log' % expid)
sys.stderr = sys.stdout

# predictions path
predictions_dir = utils.get_dir_path('model-predictions', pathfinder.METADATA_PATH)
outputs_path = predictions_dir + '/' + expid
utils.auto_make_dir(outputs_path)

print('Build model')
Exemplo n.º 12
0
import os
import evaluate_submission

theano.config.warn_float64 = 'raise'

if len(sys.argv) < 2:
    sys.exit("Usage: test_class_dsb.py <configuration_name> <valid|test>")

config_name = sys.argv[1]
set_configuration('configs_class_dsb', config_name)

set = sys.argv[2] if len(sys.argv) == 3 else 'test'

# metadata
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, config_name)

metadata = utils.load_pkl(metadata_path)
expid = metadata['experiment_id']

# logs
logs_dir = utils.get_dir_path('logs', pathfinder.METADATA_PATH)
sys.stdout = logger.Logger(logs_dir + '/%s-%s.log' % (expid, set))
sys.stderr = sys.stdout

# predictions path
predictions_dir = utils.get_dir_path('model-predictions', pathfinder.METADATA_PATH)
output_pkl_file = predictions_dir + '/%s-%s.pkl' % (expid, set)

submissions_dir = utils.get_dir_path('submissions', pathfinder.METADATA_PATH)
output_csv_file = submissions_dir + '/%s-%s.csv' % (expid, set)
Exemplo n.º 13
0
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import arg_scope

import data_iter
import nn_extra_nvp
import nn_extra_student
import utils
from config_rnn import defaults

base_metadata_path = utils.find_model_metadata('metadata/', 'bn2_omniglot_tp')

sample_batch_size = 1
n_samples = 4
rng = np.random.RandomState(42)
rng_test = np.random.RandomState(317070)
seq_len = defaults.seq_len_few_shot  # 1-shot (2nd image is a test image)
batch_size = defaults.batch_size_few_shot  # 20-way
meta_batch_size = 8

nonlinearity = tf.nn.elu
weight_norm = True

train_data_iter = data_iter.OmniglotEpisodesDataIterator(
    seq_len=seq_len,
    batch_size=batch_size,
    meta_batch_size=meta_batch_size,
    set='train',
    rng=rng,
    augment=True)
Exemplo n.º 14
0
def plot_anomaly(config_name, n_sequences, n_ignore=8):
    configs_dir = __file__.split('/')[-2]
    config = importlib.import_module('%s.%s' % (configs_dir, config_name))

    # metadata
    save_dir = utils.find_model_metadata('metadata/', args.config_name)
    expid = os.path.dirname(save_dir).split('/')[-1]

    # samples
    target_path = save_dir + "/anomaly/"
    utils.autodir(target_path)

    print('Building the model', expid)
    model = tf.make_template('model', config.build_model)

    data_iter = config.test_data_iter
    data_iter.batch_size = 1

    x_in = tf.placeholder(tf.float32,
                          shape=(data_iter.batch_size, ) + config.obs_shape)
    model_output = model(x_in)
    latent_log_probs, latent_log_probs_prior = model_output[1], model_output[2]

    saver = tf.train.Saver()

    with tf.Session() as sess:
        ckpt_file = save_dir + 'params.ckpt'
        print('restoring parameters from', ckpt_file)
        saver.restore(sess, tf.train.latest_checkpoint(save_dir))

        for iteration, (x_batch, y_batch) in zip(range(n_sequences),
                                                 data_iter.generate_anomaly()):

            assert x_batch.shape[0] == data_iter.batch_size
            assert data_iter.batch_size == 1

            lp, lpp = sess.run([latent_log_probs, latent_log_probs_prior],
                               feed_dict={x_in: x_batch})
            lp = np.squeeze(lp)
            lpp = np.squeeze(lpp)
            scores = []
            for i in range(y_batch.shape[-1]):
                print(i, lp[i], lpp[i], lp[i] - lpp[i], y_batch[0, i])
                scores.append(lp[i] - lpp[i])
            print('-------------')

            quartile_1, quartile_3 = np.percentile(scores[n_ignore:], [25, 75])
            iqr = quartile_3 - quartile_1
            lower_bound = quartile_1 - (iqr * 1.5)
            anomaly_idxs = np.where(np.asarray(scores) < lower_bound)[0]
            # don't count the first image as an outlier
            for i in range(n_ignore):
                anomaly_idxs = np.delete(anomaly_idxs,
                                         np.argwhere(anomaly_idxs == i))
            print(anomaly_idxs)

            x_batch = np.squeeze(x_batch)
            x_batch = x_batch[anomaly_idxs]

            if len(anomaly_idxs) != 0:
                plt.figure(figsize=(4, 1.5))

                gs = gridspec.GridSpec(nrows=len(anomaly_idxs),
                                       ncols=6,
                                       hspace=0.1,
                                       wspace=0.1)

                ax0 = plt.subplot(gs[:, :-1])
                for i in anomaly_idxs:
                    plt.plot((i + 1, i + 1),
                             (min(scores) - 2, max(scores) + 2),
                             'r--',
                             linewidth=0.5,
                             dashes=(1, 0.5))
                plt.plot(range(1, args.seq_len + 1),
                         scores,
                         'black',
                         linewidth=1.)
                plt.gca().axes.set_ylim([min(scores) - 2, max(scores) + 2])
                plt.gca().axes.set_xlim([-0.2, args.seq_len + 0.2])
                plt.scatter(range(1, args.seq_len + 1),
                            scores,
                            c='black',
                            s=1.5)
                plt.xticks(fontsize=6)
                plt.yticks(fontsize=6)
                plt.tick_params(axis='both', which='major', labelsize=6)
                plt.xlabel('n', fontsize=8, labelpad=0)
                plt.ylabel('score', fontsize=8, labelpad=0)

                for i in range(len(anomaly_idxs)):
                    ax1 = plt.subplot(gs[i, -1])
                    plt.imshow(x_batch[i], cmap='gray', interpolation='None')
                    plt.xticks([])
                    plt.yticks([])
                    plt.axis('off')

                plt.savefig(target_path + '/anomaly_%s_%s.png' %
                            (iteration, args.mask_dims),
                            bbox_inches='tight',
                            dpi=600,
                            pad_inches=0)
Exemplo n.º 15
0
def classify(config_name, seq_len, n_trials, batch_size):
    configs_dir = __file__.split('/')[-2]
    config = importlib.import_module('%s.%s' % (configs_dir, config_name))

    # metadata
    save_dir = utils.find_model_metadata('metadata/', args.config_name)
    expid = os.path.dirname(save_dir).split('/')[-1]

    assert seq_len == config.seq_len

    utils.autodir('logs')
    sys.stdout = logger.Logger(
        'logs/%s_test_class_%s_%s_%s.log' % (expid, n_trials, config.seq_len, batch_size))
    sys.stderr = sys.stdout

    print('Building the model', expid)
    model = tf.make_template('model', config.build_model)

    data_iter = config.test_data_iter2
    data_iter.batch_size = batch_size

    x_in = tf.placeholder(tf.float32, shape=(data_iter.batch_size,) + config.obs_shape)
    log_probs = model(x_in)[0]

    saver = tf.train.Saver()

    with tf.Session() as sess:
        ckpt_file = save_dir + 'params.ckpt'
        print('restoring parameters from', ckpt_file)
        saver.restore(sess, tf.train.latest_checkpoint(save_dir))

        trial_accuracies = []

        for trial in range(n_trials):

            generator = data_iter.generate(trial=trial)

            n_correct = 0
            n_total = 0

            x_number2scores = defaultdict(list)
            x_number2true_y = {}
            x_number2ys = {}
            for iteration, (x_batch, y_batch, x_number) in enumerate(generator):
                y_true = int(y_batch[0, -1])

                log_p = sess.run(log_probs, feed_dict={x_in: x_batch})
                log_p = log_p.reshape((data_iter.batch_size, config.seq_len))[:, -1]

                x_number2scores[x_number].append(log_p)
                x_number2true_y[x_number] = y_true
                x_number2ys[x_number] = y_batch[:, 0]
                if (1. * iteration + 1) % 1000 == 0 or n_trials == 1:
                    print(x_number + 1)

            # average scores
            for k, v in x_number2scores.items():
                y_true = x_number2true_y[k]
                avg_score = np.mean(np.asarray(v), axis=0)
                max_idx = np.argmax(avg_score)
                if x_number2ys[k][max_idx] == y_true:
                    n_correct += 1
                n_total += 1

            acc = n_correct / n_total
            print(trial, 'accuracy', acc)
            print('n test examples', n_total)
            trial_accuracies.append(acc)
            print(trial_accuracies)

        print('---------------------------------------------')
        print(n_trials, config.seq_len)
        print(trial_accuracies)
        print('average accuracy over trials', np.mean(trial_accuracies))
        print('std accuracy over trials', np.std(trial_accuracies))