def __init__(self, batch_size = 128, iterations = 32):
     BatchIterator.__init__(self, batch_size)
     self.iterations = iterations
     self.X = None
     self.y = None
     self.cidx = 0
     self.midx = 0
示例#2
0
文件: cnn.py 项目: EricDoug/tomb
 def __init__(self, batch_size = 128, iterations = 32):
     BatchIterator.__init__(self, batch_size)
示例#3
0
    update=adam,
    update_learning_rate=theano.shared(float32(0.0001), borrow=True),
    #                 update_momentum=theano.shared(float32(0.001), borrow=True),
    update_beta1=0.9,
    update_beta2=0.99,
    update_epsilon=1e-06,
    on_epoch_finished=[
        #                     AdjustVariable('update_learning_rate', start=0.3, stop=0.05),
        #                     AdjustVariable('update_momentum', start=0.001, stop=0.00299),
        #                     EarlyStopping(patience=200),
    ],
    regression=True,
    train_split=TrainSplit(eval_size=0.0),
    y_tensor_type=T.matrix,
    verbose=1,
    batch_iterator_train=BatchIterator(3200),
    max_epochs=230)

#np.random.seed(7)
#net0_clone = clone(net0)
#net0_clone.fit(t1nn_conc_shared.get_value(), y)
#net0_clone.fit(X_encoded_shared.get_value(), y)

cv_by_hand = [(np.where(cvFolds != fold)[0], np.where(cvFolds == fold)[0])
              for fold in np.unique(cvFolds)]

foldPred = np.zeros((t1nn_conc_shared.get_value().shape[0], 1))
bags = 10
for iter in xrange(0, bags):
    for fold in xrange(0, np.max(cvFolds)):
        np.random.seed(iter + 56)
    nin2_nonlinearity=rectify,

    pool2_pool_size=(2, 2),
    pool2_stride=(2, 2),

    conv5_num_filters=50,
    conv5_filter_size=(3, 3),
    conv5_nonlinearity=rectify,

    output_num_units=32 * 32,
    output_nonlinearity=sigmoid,

    update_learning_rate=LearningRate,
    update_momentum=0.975,
    objective_loss_function=lasagne.objectives.binary_crossentropy,
    batch_iterator_train=BatchIterator(batch_size=100),
    batch_iterator_test=BatchIterator(batch_size=100),
    train_split=TrainSplit(eval_size=0.05),
    regression=True,
    max_epochs=1,
    verbose=1,
);


class Logger(object):
    def __init__(self):
        self.terminal = sys.stdout
        self.log = open("logfile_6.log", "a")

    def write(self, message):
        self.terminal.write(message)
示例#5
0
    from lasagne.updates import nesterov_momentum

    clf = NeuralNet(layers=[
        ('input', InputLayer),
        ('hidden1', DenseLayer),
        ('output', DenseLayer),
    ],
                    input_shape=(None, 784),
                    output_num_units=10,
                    output_nonlinearity=softmax,
                    eval_size=0.0,
                    more_params=dict(hidden1_num_units=300, ),
                    update=nesterov_momentum,
                    update_learning_rate=0.02,
                    update_momentum=0.9,
                    batch_iterator_train=BatchIterator(batch_size=25),
                    max_epochs=10,
                    verbose=1)
    classifiers.append(('nolearn.lasagne', clf))

RUNS = 1

for name, orig in classifiers:
    times = []
    accuracies = []
    for i in range(RUNS):
        start = time.time()

        clf = clone(orig)
        clf.random_state = int(time.time())
        clf.fit(X_train, y_train)
    loss = tf.reduce_mean(tf.square(predictions - tf_y_batch))
    #Optimizer
    optimizer = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(loss)

every_epoch_to_log = 1

with tf.Session(graph=graph) as session:
    session.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    train_loss_history = np.zeros(num_epochs)
    valid_loss_history = np.zeros(num_epochs)
    print("============ TRAINING =============")
    for epoch in range(num_epochs):
        current_epoch = epoch
        batch_iterator = BatchIterator(batch_size=batch_size, shuffle=True)
        for x_batch, y_batch in batch_iterator(voxels, labels):
            session.run(optimizer,
                        feed_dict={
                            tf_x_batch: x_batch,
                            tf_y_batch: y_batch,
                            is_training: True
                        })

        #to log the losses get predictions on entire training set
        p = []
        total_loss = 0
        if (epoch % every_epoch_to_log == 0):
            batch_iterator = BatchIterator(batch_size=10)  #128
            for x_batch, y_batch in batch_iterator(voxels, labels):
                #[p_batch]=session.run([logits],feed_dict={tf_x_batch:x_batch,is_training:False})
示例#7
0
    from lasagne.updates import nesterov_momentum

    clf = NeuralNet(layers=[
        ('input', InputLayer),
        ('hidden1', DenseLayer),
        ('output', DenseLayer),
    ],
                    input_shape=(None, 784),
                    output_num_units=10,
                    output_nonlinearity=softmax,
                    eval_size=0.0,
                    more_params=dict(hidden1_num_units=200, ),
                    update=nesterov_momentum,
                    update_learning_rate=0.02,
                    update_momentum=0.9,
                    batch_iterator_train=BatchIterator(batch_size=300),
                    max_epochs=10,
                    verbose=1)
    classifiers.append(('nolearn.lasagne', clf))

RUNS = 10

for name, orig in classifiers:
    times = []
    accuracies = []
    for i in range(RUNS):
        start = time.time()

        clf = clone(orig)
        clf.random_state = int(time.time())
        clf.fit(X_train, y_train)
示例#8
0
def build_net(randomize=False,loss=categorical_crossentropy,
        y_tensor_type=None,dropfactor=1.0,sizefactor=1):

    layers0=[('input',InputLayer),
            ('dropin',DropoutLayer),
            ('dense0',DenseLayer),
            ('dropout0',DropoutLayer),
            ('dense1',DenseLayer),
            ('dropout1',DropoutLayer),
            ('dense2',DenseLayer),
            ('dropout2',DropoutLayer),
            ('output',DenseLayer)]
    n=[int(512*sizefactor),int(800*sizefactor),int(1024*sizefactor)]
    leak=[0.3,0.0,0.0]
    drop=[0.1,0.2,0.3,0.4]
    if randomize:
        for i in range(3):
            n[i] += np.random.randint(low=-n[i]//15,high=n[i]//15)
        """
        for i in range(4):
            drop[i] *= np.random.uniform(0.8,1.2)
        leak[0]=np.random.uniform(0.2,0.3)
        leak[1]=np.random.uniform(0,0.1)
        leak[2]=np.random.uniform(0.0,0.05)
        """
        print "net: ", n,leak,drop

    net0=NeuralNet(layers=layers0,
        input_shape=(None,num_features),
        dropin_p=drop[0]*dropfactor,
        dense0_num_units=n[0],
        dense0_W=HeNormal(),
        dense0_nonlinearity=LeakyRectify(leak[0]),

        dropout0_p=drop[1]*dropfactor,
        dense1_num_units=n[1],
        dense1_nonlinearity=LeakyRectify(leak[1]),
        dense1_W=HeNormal(), 
 

        dropout1_p=drop[2]*dropfactor,
        dense2_num_units=n[2], # 1024
        dense2_nonlinearity=LeakyRectify(leak[2]),
        dense2_W=HeNormal(),
 
        dropout2_p=drop[3]*dropfactor, 
        
        output_num_units=num_classes,
        output_nonlinearity=softmax,

        update=nesterov_momentum,
        update_learning_rate = theano.shared(tfloat32(0.02)),
        update_momentum = theano.shared(tfloat32(0.9)),
        eval_size=0.0,
        verbose=1,
        max_epochs=150,
        on_epoch_finished=[
            AdjustVariable('update_learning_rate',
                epochs=[50,100],rates=[2e-3,2e-4])],
        regularization_rate=1e-5,
        batch_iterator_train=BatchIterator(batch_size=128),
        objective_loss_function= loss,
        y_tensor_type=y_tensor_type
        )

    return net0
示例#9
0
        'num_units': 1024
    }),

    # the output layer
    (layers.DenseLayer, {
        'num_units': 11,
        'nonlinearity': lasagne.nonlinearities.softmax
    }),
]
net0 = NeuralNet(
    layers=layers0,
    update_learning_rate=0.0001,
    max_epochs=100,
    update=adam,
    objective_l2=0.0025,
    batch_iterator_train=BatchIterator(
        path='/home/sharique/Documents/minor/dataset/', batch_size=60),
    batch_iterator_test=BatchIterator(
        path='/home/sharique/Documents/minor/dataset/', batch_size=29),
    train_split=TrainSplit(eval_size=0.02),
    verbose=1,
)

#################################################################
# bi = BatchIterator(batch_size=29)
#################################################################

# from nolearn.lasagne import PrintLayerInfo

# layer_info=PrintLayerInfo()
#################################################################
# type(Y_train)
softmax_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
    logits, tf_y_batch)
loss = tf.reduce_mean(softmax_cross_entropy)

with tf.variable_scope('fc8', reuse=True):
    optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(
        loss, var_list=[tf.get_variable('weights'),
                        tf.get_variable('biases')])

with tf.Session() as session:
    session.run(tf.global_variables_initializer())

    print('Training...')
    for epoch in range(10):
        # Train on whole randomised dataset in batches
        batch_iterator = BatchIterator(batch_size=128, shuffle=True)
        for x_batch, y_batch in batch_iterator(X_train, y_train):
            session.run([optimizer],
                        feed_dict={
                            tf_x_batch: x_batch,
                            tf_y_batch: y_batch
                        })

        p = []
        sce = []
        batch_iterator = BatchIterator(batch_size=128)
        for x_batch, y_batch in batch_iterator(X_valid, y_valid):
            [p_batch,
             sce_batch] = session.run([predictions, softmax_cross_entropy],
                                      feed_dict={
                                          tf_x_batch: x_batch,
def make_grnn(
        batch_size,
        emb_size,
        g_hidden_size,
        word_n,
        wc_num,
        dence,
        wsm_num=1,
        rnn_type='LSTM',
        rnn_size=12,
        dropout_d=0.5,  # pooling='mean',
        quest_na=4,
        gradient_steps=-1,
        valid_indices=None,
        lr=0.05,
        grad_clip=10):
    def select_rnn(x):
        return {
            'RNN': LL.RecurrentLayer,
            'LSTM': LL.LSTMLayer,
            'GRU': LL.GRULayer,
        }.get(x, LL.LSTMLayer)

#    dence = dence + [1]

    RNN = select_rnn(rnn_type)
    #------------------------------------------------------------------input layers
    layers = [
        (LL.InputLayer, {
            'name': 'l_in_se_q',
            'shape': (None, word_n, emb_size)
        }),
        (LL.InputLayer, {
            'name': 'l_in_se_a',
            'shape': (None, quest_na, word_n, emb_size)
        }),
        (LL.InputLayer, {
            'name': 'l_in_mask_q',
            'shape': (None, word_n)
        }),
        (LL.InputLayer, {
            'name': 'l_in_mask_a',
            'shape': (None, quest_na, word_n)
        }),
        (LL.InputLayer, {
            'name': 'l_in_mask_ri_q',
            'shape': (None, word_n)
        }),
        (LL.InputLayer, {
            'name': 'l_in_mask_ri_a',
            'shape': (None, quest_na, word_n)
        }),
        (LL.InputLayer, {
            'name': 'l_in_wt_q',
            'shape': (None, word_n, word_n)
        }),
        (LL.InputLayer, {
            'name': 'l_in_wt_a',
            'shape': (None, word_n, quest_na, word_n)
        }),
        (LL.InputLayer, {
            'name': 'l_in_act_',
            'shape': (None, word_n, g_hidden_size)
        }),
        (LL.InputLayer, {
            'name': 'l_in_act__',
            'shape': (None, word_n, word_n, g_hidden_size)
        }),
    ]
    #------------------------------------------------------------------slice layers
    #    l_qs = []
    #    l_cas = []
    l_ase_names = ['l_ase_{}'.format(i) for i in range(quest_na)]
    l_amask_names = ['l_amask_{}'.format(i) for i in range(quest_na)]
    l_amask_ri_names = ['l_amask_ri_{}'.format(i) for i in range(quest_na)]
    l_awt_names = ['l_awt_{}'.format(i) for i in range(quest_na)]
    for i in range(quest_na):
        layers.extend([(LL.SliceLayer, {
            'name': l_ase_names[i],
            'incoming': 'l_in_se_a',
            'indices': i,
            'axis': 1
        })])
    for i in range(quest_na):
        layers.extend([(LL.SliceLayer, {
            'name': l_amask_names[i],
            'incoming': 'l_in_mask_a',
            'indices': i,
            'axis': 1
        })])
    for i in range(quest_na):
        layers.extend([(LL.SliceLayer, {
            'name': l_amask_ri_names[i],
            'incoming': 'l_in_mask_ri_a',
            'indices': i,
            'axis': 1
        })])
    for i in range(quest_na):
        layers.extend([(LL.SliceLayer, {
            'name': l_awt_names[i],
            'incoming': 'l_in_wt_a',
            'indices': i,
            'axis': 1
        })])
#-------------------------------------------------------------------GRNN layers
    WC = theano.shared(
        np.random.randn(wc_num, g_hidden_size,
                        g_hidden_size).astype('float32'))
    #    WC = LI.Normal(0.1)
    WSM = theano.shared(
        np.random.randn(emb_size, g_hidden_size).astype('float32'))
    b = theano.shared(np.ones(g_hidden_size).astype('float32'))
    #    b = lasagne.init.Constant(1.0)
    layers.extend([(GRNNLayer, {
        'name':
        'l_q_grnn',
        'incomings':
        ['l_in_se_q', 'l_in_mask_q', 'l_in_wt_q', 'l_in_act_', 'l_in_act__'],
        'emb_size':
        emb_size,
        'hidden_size':
        g_hidden_size,
        'word_n':
        word_n,
        'wc_num':
        wc_num,
        'wsm_num':
        wsm_num,
        'only_return_final':
        False,
        'WC':
        WC,
        'WSM':
        WSM,
        'b':
        b
    })])
    l_a_grnns_names = ['l_a_grnn_{}'.format(i) for i in range(quest_na)]
    for i, l_a_grnns_name in enumerate(l_a_grnns_names):
        layers.extend([(GRNNLayer, {
            'name':
            l_a_grnns_name,
            'incomings': [
                l_ase_names[i], l_amask_names[i], l_awt_names[i], 'l_in_act_',
                'l_in_act__'
            ],
            'emb_size':
            emb_size,
            'hidden_size':
            g_hidden_size,
            'word_n':
            word_n,
            'wc_num':
            wc_num,
            'wsm_num':
            wsm_num,
            'only_return_final':
            False,
            'WC':
            WC,
            'WSM':
            WSM,
            'b':
            b
        })])
#------------------------------------------------------------concatenate layers
    layers.extend([(LL.ConcatLayer, {
        'name': 'l_qa_concat',
        'incomings': ['l_q_grnn'] + l_a_grnns_names
    })])
    layers.extend([(LL.ConcatLayer, {
        'name': 'l_qamask_concat',
        'incomings': ['l_in_mask_ri_q'] + l_amask_ri_names
    })])
    #--------------------------------------------------------------------RNN layers
    layers.extend([(RNN, {
        'name': 'l_qa_rnn_f',
        'incoming': 'l_qa_concat',
        'mask_input': 'l_qamask_concat',
        'num_units': rnn_size,
        'backwards': False,
        'only_return_final': True,
        'grad_clipping': grad_clip
    })])
    layers.extend([(RNN, {
        'name': 'l_qa_rnn_b',
        'incoming': 'l_qa_concat',
        'mask_input': 'l_qamask_concat',
        'num_units': rnn_size,
        'backwards': True,
        'only_return_final': True,
        'grad_clipping': grad_clip
    })])
    layers.extend([(LL.ElemwiseSumLayer, {
        'name': 'l_qa_rnn_conc',
        'incomings': ['l_qa_rnn_f', 'l_qa_rnn_b']
    })])
    ##-----------------------------------------------------------------pooling layer
    ##    l_qa_pool = layers.extend([(LL.ExpressionLayer, {'name': 'l_qa_pool',
    ##                                                     'incoming': l_qa_rnn_conc,
    ##                                                     'function': lambda X: X.mean(-1),
    ##                                                     'output_shape'='auto'})])
    #------------------------------------------------------------------dence layers
    l_dence_names = ['l_dence_{}'.format(i) for i, _ in enumerate(dence)]
    if dropout_d:
        layers.extend([(LL.DropoutLayer, {
            'name': 'l_dence_do' + 'do',
            'p': dropout_d
        })])
    for i, d in enumerate(dence):
        if i < len(dence) - 1:
            nonlin = LN.tanh
        else:
            nonlin = LN.softmax
        layers.extend([(LL.DenseLayer, {
            'name': l_dence_names[i],
            'num_units': d,
            'nonlinearity': nonlin
        })])
        if i < len(dence) - 1 and dropout_d:
            layers.extend([(LL.DropoutLayer, {
                'name': l_dence_names[i] + 'do',
                'p': dropout_d
            })])

    def loss(x, t):
        return LO.aggregate(
            LO.categorical_crossentropy(T.clip(x, 1e-6, 1. - 1e-6), t))


#        return LO.aggregate(LO.squared_error(T.clip(x, 1e-6, 1. - 1e-6), t))

    if isinstance(valid_indices, np.ndarray) or isinstance(
            valid_indices, list):
        train_split = TrainSplit_indices(valid_indices=valid_indices)
    else:
        train_split = TrainSplit(eval_size=valid_indices, stratify=False)
    nnet = NeuralNet(
        y_tensor_type=T.ivector,
        layers=layers,
        update=LU.adagrad,
        update_learning_rate=lr,
        #            update_epsilon=1e-7,
        objective_loss_function=loss,
        regression=False,
        verbose=2,
        batch_iterator_train=PermIterator(batch_size=batch_size),
        batch_iterator_test=BatchIterator(batch_size=batch_size / 2),
        #            batch_iterator_train=BatchIterator(batch_size=batch_size),
        #            batch_iterator_test=BatchIterator(batch_size=batch_size),
        #train_split=TrainSplit(eval_size=eval_size)
        train_split=train_split)
    nnet.initialize()
    PrintLayerInfo()(nnet)
    return nnet
示例#12
0
def train_model(params, X_train, y_train, X_valid, y_valid, X_test, y_test):
    
    # Initialisation routines: generate variable scope, create logger, note start time.
    paths = Paths(params)
    start = time.time()
    model_variable_scope = paths.var_scope

    
    # Build the graph
    graph = tf.Graph()
    with graph.as_default():
        # Input data. For the training data, we use a placeholder that will be fed at run time with a training minibatch.
        tf_x_batch = tf.placeholder(tf.float32, shape = (None, params.image_size[0], params.image_size[1], 1))
        tf_y_batch = tf.placeholder(tf.float32, shape = (None, params.num_classes))
        is_training = tf.placeholder(tf.bool)
        current_epoch = tf.Variable(0, trainable=False)  # count the number of epochs

        # Model parameters.
        if params.learning_rate_decay:
            learning_rate = tf.train.exponential_decay(params.learning_rate, current_epoch, decay_steps = params.max_epochs, decay_rate = 0.01)
        else:
            learning_rate = params.learning_rate
            
        # Training computation.
        with tf.variable_scope(model_variable_scope):
            logits = model_pass(tf_x_batch, params, is_training)
            if params.l2_reg_enabled:
                with tf.variable_scope('fc4', reuse = True):
                    l2_loss = tf.nn.l2_loss(tf.get_variable('weights'))
            else:
                l2_loss = 0

        predictions = tf.nn.softmax(logits)
        softmax_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=tf_y_batch, logits=logits)
        loss = tf.reduce_mean(softmax_cross_entropy) + params.l2_lambda * l2_loss  

        # Optimizer.
        optimizer = tf.train.AdamOptimizer(
            learning_rate = learning_rate
        ).minimize(loss)

    with tf.Session(graph = graph) as session:
        session.run(tf.global_variables_initializer())

        # A routine for evaluating current model parameters
        def get_accuracy_and_loss_in_batches(X, y, is_test=False):
            p = []
            sce = []
            batch_iterator = BatchIterator(batch_size = 128)
            for x_batch, y_batch in batch_iterator(X, y):
                [p_batch, sce_batch] = session.run([predictions, softmax_cross_entropy], feed_dict = {
                        tf_x_batch : x_batch, 
                        tf_y_batch : y_batch,
                        is_training : False
                    }
                )
                p.extend(p_batch)
                sce.extend(sce_batch)                
            p = np.array(p)
            sce = np.array(sce)
            accuracy = 100.0 * np.sum(np.argmax(p, 1) == np.argmax(y, 1)) / p.shape[0]
            loss = np.mean(sce)
            if is_test:
                print ("test accuracy ", accuracy, " test loss ", loss)
            return (accuracy, loss)
        
        # If we chose to keep training previously trained model, restore session.
        if params.resume_training: 
            try:
                tf.train.Saver().restore(session, paths.model_path)
            except Exception as e:
                print("Failed restoring previously trained model: file does not exist.")
                pass
        
        saver = tf.train.Saver()
#         early_stopping = EarlyStopping(tf.train.Saver(), session, patience = params.early_stopping_patience, minimize = True)
        train_loss_history = np.empty([0], dtype = np.float32)
        train_accuracy_history = np.empty([0], dtype = np.float32)
        valid_loss_history = np.empty([0], dtype = np.float32)
        valid_accuracy_history = np.empty([0], dtype = np.float32)
        if params.max_epochs > 0:
            print("================= TRAINING ==================")
        else:
            print("================== TESTING ==================")
        
        for epoch in range(params.max_epochs):
            current_epoch = epoch
            print ("current epoch ", current_epoch)
            # Train on whole randomised dataset in batches
            batch_iterator = BatchIterator(batch_size = params.batch_size, shuffle = True)
            batch_cnt=0
            for x_batch, y_batch in batch_iterator(X_train, y_train):
                batch_cnt+=1
#                 print ("batch ", batch_cnt)
                session.run([optimizer], feed_dict = {
                        tf_x_batch : x_batch, 
                        tf_y_batch : y_batch,
                        is_training : True
                    }
                )

            # If another significant epoch ended, we log our losses.
            if (epoch % params.log_epoch == 0):
                # Get validation data predictions and log validation loss:
                valid_accuracy, valid_loss = get_accuracy_and_loss_in_batches(X_valid, y_valid)

                # Get training data predictions and log training loss:
                train_accuracy, train_loss = get_accuracy_and_loss_in_batches(X_train, y_train)
                print ("train accuracy ", train_accuracy, " train loss ", train_loss)
                print ("valid accuracy ", valid_accuracy, " valid loss ", valid_loss)
            else:
                valid_loss = 0.
                valid_accuracy = 0.
                train_loss = 0.
                train_accuracy = 0.
                
            valid_loss_history = np.append(valid_loss_history, [valid_loss])
            valid_accuracy_history = np.append(valid_accuracy_history, [valid_accuracy])
            train_loss_history = np.append(train_loss_history, [train_loss])
            train_accuracy_history = np.append(train_accuracy_history, [train_accuracy])
            

        # Evaluate on test dataset.
        test_accuracy, test_loss = get_accuracy_and_loss_in_batches(X_test, y_test)
#         valid_accuracy, valid_loss = get_accuracy_and_loss_in_batches(X_valid, y_valid)
        # Save model weights for future use.
        saved_model_path = saver.save(session, paths.model_path)

        np.savez(paths.train_history_path, train_loss_history = train_loss_history, train_accuracy_history = train_accuracy_history, valid_loss_history = valid_loss_history, valid_accuracy_history = valid_accuracy_history)
示例#13
0
def main():
    c = color_codes()
    patch_size = (15, 15, 15)
    dir_name = '/home/sergivalverde/w/CNN/images/CH16'
    patients = [
        f for f in sorted(os.listdir(dir_name))
        if os.path.isdir(os.path.join(dir_name, f))
    ]
    names = np.stack([
        name for name in
        [[
            os.path.join(dir_name, patient, 'FLAIR_preprocessed.nii.gz')
            for patient in patients
        ],
         [
             os.path.join(dir_name, patient, 'DP_preprocessed.nii.gz')
             for patient in patients
         ],
         [
             os.path.join(dir_name, patient, 'T2_preprocessed.nii.gz')
             for patient in patients
         ],
         [
             os.path.join(dir_name, patient, 'T1_preprocessed.nii.gz')
             for patient in patients
         ]] if name is not None
    ],
                     axis=1)
    seed = np.random.randint(np.iinfo(np.int32).max)
    ''' Here we create an initial net to find conflictive voxels '''
    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Running iteration ' + c['b'] + '1>' + c['nc'])
    net_name = '/home/sergivalverde/w/CNN/code/CNN1/miccai_challenge2016/deep-challenge2016.init.'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer,
             dict(name='conv1_1',
                  num_filters=32,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_1',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (Conv3DDNNLayer,
             dict(name='conv2_1',
                  num_filters=64,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_2',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer,
             dict(name='out', num_units=2,
                  nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        on_epoch_finished=[
            SaveWeights(net_name + 'model_weights.pkl',
                        only_best=True,
                        pickle=False),
            SaveTrainingHistory(net_name + 'model_history.pkl'),
            PlotTrainingHistory(net_name + 'training_history.png'),
            EarlyStopping(patience=10)
        ],
        verbose=10,
        max_epochs=50,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum(
            (p + t[:, 1])))],
    )

    try:
        net.load_params_from(net_name + 'model_weights.pkl')
    except IOError:
        print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
              'Loading the data for ' + c['b'] + 'iteration 1' + c['nc'])
        # Create the data
        (x, y, _) = load_patches(dir_name=dir_name,
                                 use_flair=True,
                                 use_pd=True,
                                 use_t2=True,
                                 use_t1=True,
                                 use_gado=False,
                                 flair_name='FLAIR_preprocessed.nii.gz',
                                 pd_name='DP_preprocessed.nii.gz',
                                 t2_name='T2_preprocessed.nii.gz',
                                 t1_name='T1_preprocessed.nii.gz',
                                 gado_name=None,
                                 mask_name='Consensus.nii.gz',
                                 size=patch_size)

        print('-- Permuting the data')
        np.random.seed(seed)
        x_train = np.random.permutation(
            np.concatenate(x).astype(dtype=np.float32))
        print('-- Permuting the labels')
        np.random.seed(seed)
        y_train = np.random.permutation(
            np.concatenate(y).astype(dtype=np.int32))
        y_train = y_train[:, y_train.shape[1] / 2 + 1,
                          y_train.shape[2] / 2 + 1, y_train.shape[3] / 2 + 1]
        print('-- Training vector shape = (' +
              ','.join([str(length) for length in x_train.shape]) + ')')
        print('-- Training labels shape = (' +
              ','.join([str(length) for length in y_train.shape]) + ')')

        print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +\
            'Training (' + c['b'] + 'initial' + c['nc'] + c['g'] + ')' + c['nc']
        # We try to get the last weights to keep improving the net over and over
        net.fit(x_train, y_train)
    ''' Here we get the seeds '''
    print c['c'] + '[' + strftime(
        "%H:%M:%S") + '] ' + c['g'] + '<Looking for seeds>' + c['nc']
    for patient in names:
        output_name = os.path.join('/'.join(patient[0].rsplit('/')[:-1]),
                                   'test.iter1.nii.gz')
        try:
            load_nii(output_name)
            print c['c'] + '[' + strftime("%H:%M:%S") + '] ' \
                + c['g'] + '-- Patient ' + patient[0].rsplit('/')[-2] + ' already done' + c['nc']
        except IOError:
            print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
                  + c['g'] + '-- Testing with patient ' + c['b'] + patient[0].rsplit('/')[-2] + c['nc']
            image_nii = load_nii(patient[0])
            image = np.zeros_like(image_nii.get_data())
            for batch, centers in load_patch_batch(patient, 100000,
                                                   patch_size):
                y_pred = net.predict_proba(batch)
                [x, y, z] = np.stack(centers, axis=1)
                image[x, y, z] = y_pred[:, 1]

            print c['g'] + '-- Saving image ' + c['b'] + output_name + c['nc']
            image_nii.get_data()[:] = image
            image_nii.to_filename(output_name)
    ''' Here we perform the last iteration '''
    print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c[
        'g'] + '<Running iteration ' + c['b'] + '2>' + c['nc']
    net_name = '/home/sergivalverde/w/CNN/code/CNN1/miccai_challenge2016/deep-challenge2016.final.'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer,
             dict(name='conv1_1',
                  num_filters=32,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_1',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (Conv3DDNNLayer,
             dict(name='conv2_1',
                  num_filters=64,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_2',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer,
             dict(name='out', num_units=2,
                  nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        on_epoch_finished=[
            SaveWeights(net_name + 'model_weights.pkl',
                        only_best=True,
                        pickle=False),
            SaveTrainingHistory(net_name + 'model_history.pkl'),
            PlotTrainingHistory(net_name + 'training_history.png'),
        ],
        batch_iterator_train=BatchIterator(batch_size=4096),
        verbose=10,
        max_epochs=2000,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum(
            (p + t[:, 1])))],
    )

    try:
        net.load_params_from(net_name + 'model_weights.pkl')
    except IOError:
        pass
    print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
        + c['g'] + 'Loading the data for ' + c['b'] + 'iteration 2' + c['nc']
    (x, y,
     names) = load_patches(dir_name='/home/sergivalverde/w/CNN/images/CH16',
                           use_flair=True,
                           use_pd=True,
                           use_t2=True,
                           use_t1=True,
                           use_gado=False,
                           flair_name='FLAIR_preprocessed.nii.gz',
                           pd_name='DP_preprocessed.nii.gz',
                           t2_name='T2_preprocessed.nii.gz',
                           gado_name=None,
                           t1_name='T1_preprocessed.nii.gz',
                           mask_name='Consensus.nii.gz',
                           size=patch_size,
                           roi_name='test.iter1.nii.gz')

    print '-- Permuting the data'
    np.random.seed(seed)
    x_train = np.random.permutation(np.concatenate(x).astype(dtype=np.float32))
    print '-- Permuting the labels'
    np.random.seed(seed)
    y_train = np.random.permutation(np.concatenate(y).astype(dtype=np.int32))
    y_train = y_train[:, y_train.shape[1] / 2 + 1, y_train.shape[2] / 2 + 1,
                      y_train.shape[3] / 2 + 1]
    print '-- Training vector shape = (' + ','.join(
        [str(length) for length in x_train.shape]) + ')'
    print '-- Training labels shape = (' + ','.join(
        [str(length) for length in y_train.shape]) + ')'
    print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
        + c['g'] + 'Training (' + c['b'] + 'final' + c['nc'] + c['g'] + ')' + c['nc']
    net.fit(x_train, y_train)
示例#14
0
文件: convnet.py 项目: myinxd/agn-ae
 def gen_BatchIterator(self, batch_size=100, shuffle=True):
     """Generate the batch iterator"""
     B = BatchIterator(batch_size=batch_size, shuffle=shuffle)
     return B
示例#15
0
 def __init__(self, batch_size = 128):
     BatchIterator.__init__(self, batch_size)
示例#16
0
 def test_shuffle_no_copy(self, BatchIterator, X, y):
     bi = BatchIterator(2, shuffle=True)(X, y)
     X0, y0 = list(bi)[0]
     assert X0.base is X  # make sure X0 is a view
示例#17
0
def test():

	if not args.no_predict_test:
		print "Reading in the test files before starting the loop"
		preprocess_test_data()

	print("Validating...")
	for ch in range(args.no_channels):
		print 'Prediction for channel', ch
		print "Changing batch iterator test:"
		from batch_iterators import BI_test_sch_sch
		netSpec.batch_iterator_test = BI_test_sch_sch(batch_size=256,channel=ch)
		if include_userdata:
			prediction = predict(netSpec, {'sensors':xVal,'user':udVal})
			probabilities = netSpec.predict_proba({'sensors':xVal,'user':udVal})
			print "probabilities.shape", probabilities.shape
		else:
			prediction = predict(netSpec, xVal)
			print "xVal.shape", xVal.shape
			probabilities = netSpec.predict_proba(xVal)
			print "probabilities.shape", probabilities.shape


		print("Showing last 30 test samples..")
		print("Predictions:")
		print(prediction[-30:])
		print("Ground Truth:")
		print(yVal[-30:])
		print("Performance on relevant data")
		print 'yVal.shape', yVal.shape
		print 'prediction.shape', prediction.shape
		result = yVal==prediction
		faults = yVal!=prediction
		acc_val = float(np.sum(result))/float(len(result))
		print "Accuracy validation: ", acc_val
		print "Error rate (%): ", 100*(1-acc_val)
		#print np.nonzero(faults)

		from sklearn.metrics import confusion_matrix
		cm =  confusion_matrix(yVal,prediction)
		print cm
		
		from sklearn.metrics import roc_auc_score,log_loss
		print "roc_auc:", roc_auc_score(yVal, probabilities[:,1])
		print "log_loss:", log_loss(yVal, probabilities[:,1])

		print "Changing batch iterator test:"
		from nolearn.lasagne import BatchIterator
		netSpec.batch_iterator_test = BatchIterator(batch_size=256)
		print "Calculating final prediction for the hour long sessions"


		print "magnitudes_normal_val.shape", g.magnitudes_normal_val.shape
		probabilities_hour = []
		for mag_hour in g.magnitudes_normal_val:
			m_hour = mag_hour[ch]
			patches = rolling_window_ext(m_hour,(magnitude_window,ceil-floor))
			predictions_patches = netSpec.predict_proba(patches)
			prediction_hour = np.sum(predictions_patches,axis=0)/predictions_patches.shape[0]
			probabilities_hour.append(prediction_hour[1])

		print "magnitudes_seizure_val.shape", g.magnitudes_seizure_val.shape
		for mag_hour in g.magnitudes_seizure_val:
			m_hour = mag_hour[ch]
			patches = rolling_window_ext(m_hour,(magnitude_window,ceil-floor))
			predictions_patches = netSpec.predict_proba(patches)
			prediction_hour = np.sum(predictions_patches,axis=0)/predictions_patches.shape[0]
			probabilities_hour.append(prediction_hour[1])

		yVal_hour = np.hstack((np.zeros(g.magnitudes_normal_val.shape[0]),np.ones(g.magnitudes_seizure_val.shape[0])))
		print "roc_auc for the hours:", roc_auc_score(yVal_hour, probabilities_hour)
		print "log_loss for the hours:", log_loss(yVal_hour, probabilities_hour)

		print "saving predictions to csv file" 
		patient_str = '-'.join(args.patients)
		csv_filename = 'hours'+patient_str+'_'+cfg['training']['model']+'_'+datetime.now().strftime("%m-%d-%H-%M-%S")+'.csv'
		print csv_filename
		csv=open('./results/'+csv_filename, 'w+')
		for i in range(yVal_hour.shape[0]):
			csv.write(str(yVal_hour[i])+','+str(probabilities_hour[i])+'\n')
		csv.close
		
		predictions_hour = np.round(probabilities_hour)
		result_hour = yVal_hour==predictions_hour
		acc_val_hour = float(np.sum(result_hour))/float(len(result_hour))
		print "Accuracy validation for the hours: ", acc_val_hour

		if not args.no_predict_test:
			print "Calculating the predictions for the test files"
			probabilities_test = []
			for mag_test in magnitudes_test:
				m_test = mag_test[ch]
				patches = rolling_window_ext(m_test,(magnitude_window,ceil-floor))
				predictions_patches = netSpec.predict_proba(patches)
				prediction_hour = np.sum(predictions_patches,axis=0)/predictions_patches.shape[0]
				probabilities_test.append(prediction_hour[1])

			print "saving predictions to csv file" 
			csv_filename = patient_str+'_'+str(ch)+'_'+cfg['training']['model']+'_'+datetime.now().strftime("%m-%d-%H-%M-%S")+'.csv'
			print csv_filename
			csv=open('./results/'+csv_filename, 'w+')
			counter = 0
			for dataset in datasets.all:
				if dataset.enabled and not dataset.trainset:
					for i in range(int(dataset.no_files * args.debug_sub_ratio)):
						filename = dataset.base_name+str(i+1)+'.mat'
						csv.write(filename+','+str(probabilities_test[counter+i])+'\n')
			csv.close