示例#1
0
def create_model():
    from elektronn2 import neuromancer as nm

    in_sh = (None, 1, 22, 140, 140)
    inp = nm.Input(in_sh, 'b,f,z,x,y', name='raw')  # high res

    # Convolution, downsampling of intermediate features
    conv0 = nm.Conv(inp, 20, (1, 3, 3))
    conv1 = nm.Conv(conv0, 20, (1, 3, 3))
    down0 = nm.Pool(conv1, (1, 2, 2), mode='max')  # mid res
    conv2 = nm.Conv(down0, 30, (1, 3, 3))
    conv3 = nm.Conv(conv2, 30, (1, 3, 3))
    down1 = nm.Pool(conv3, (1, 2, 2), mode='max')  # low res
    conv4 = nm.Conv(down1, 35, (1, 3, 3))
    conv5 = nm.Conv(conv4, 35, (1, 3, 3))
    down2 = nm.Pool(conv5, (1, 2, 2), mode='max')  # very low res
    conv6 = nm.Conv(down2, 42, (3, 3, 3))
    down2b = nm.Pool(conv6, (1, 2, 2), mode='max')  # very low res, even lower
    conv7 = nm.Conv(down2b, 42, (3, 3, 3))

    # Merging very low-res features with low-res features
    mrg0 = nm.UpConvMerge(conv5, conv7, 45)
    mconv0 = nm.Conv(mrg0, 42, (1, 3, 3))
    mconv1 = nm.Conv(mconv0, 42, (1, 3, 3))

    # Merging low-res with mid-res features
    mrg1 = nm.UpConvMerge(conv3, mconv1, 42)
    mconv2 = nm.Conv(mrg1, 35, (3, 3, 3))
    mconv3 = nm.Conv(mconv2, 35, (3, 3, 3))

    # Merging mid-res with high-res features
    mrg2 = nm.UpConvMerge(conv1, mconv3, 30)
    mconv4 = nm.Conv(mrg2, 20, (3, 3, 3))
    mconv5 = nm.Conv(mconv4, 20, (3, 3, 3))

    barr = nm.Conv(mconv5, 2, (1, 1, 1), activation_func='lin', name='barr')
    probs = nm.Softmax(barr)

    target = nm.Input_like(mconv5, override_f=1, name='target')

    loss_pix = nm.MultinoulliNLL(probs,
                                 target,
                                 target_is_sparse=True,
                                 name='nll_barr')

    loss = nm.AggregateLoss(loss_pix, name='loss')
    errors = nm.Errors(probs, target, target_is_sparse=True)

    model = nm.model_manager.getmodel()
    model.designate_nodes(input_node=inp,
                          target_node=target,
                          loss_node=loss,
                          prediction_node=probs,
                          prediction_ext=[loss, errors, probs])
    return model
示例#2
0
# Convolution, downsampling of intermediate features

conv0 = nm.Conv(inp, 8, (15, 15, 15), (1, 1, 1), "same", name="c0")
down0 = nm.Pool(conv0, (1, 4, 4), mode="max", name="d0")  #full

conv1 = nm.Conv(down0, 32, (15, 15, 15), (1, 1, 1), "same", name="c1")
down1 = nm.Pool(conv1, (1, 2, 2), mode="max", name="d1")  #high

conv2 = nm.Conv(down1, 64, (15, 15, 15), (1, 1, 1), "same", name="c2")  #mid
down2 = nm.Pool(conv2, (1, 2, 2), mode="max", name="d2")  #high

conv3 = nm.Conv(down2, 64, (15, 15, 15), (1, 1, 1), "same", name="c3")  #low

#merge C2 and C3 and convolve
mrg0 = nm.UpConvMerge(conv2, conv3, 64, name="m0", merge_mode="add")
mconv0 = nm.Conv(mrg0, 32, (15, 15, 15), (1, 1, 1), "same", name="mc0")

#merge mc0 and c1 and convolve
mrg1 = nm.UpConvMerge(mconv0, conv1, 32, name="m1", merge_mode="add")
mconv1 = nm.Conv(mrg1, 8, (15, 15, 15), (1, 1, 1), "same", name="mc1")

#merge mc1 and c0 and convolve
mrg2 = nm.UpConvMerge(mconv1, conv0, 8, name="m2", merge_mode="add")
mconv2 = nm.Conv(mrg2, 3, (15, 15, 15), (1, 1, 1), "same", name="mc2")

affins = nm.Input((None, 3, 16, 128, 128), 'b,f,z,x,y', name='affins')

loss_node = nm.loss.BinaryNLL(mconv2, affins)
loss = nm.AggregateLoss(loss_node)
示例#3
0
def create_model():
    from elektronn2 import neuromancer
    import theano.tensor as T
    import numpy as np

    in_sh = (None,1,572-32*9,572-32*9)
    img = neuromancer.Input(in_sh, 'b,f,x,y', name='raw')

    out0  = neuromancer.Conv(img,  64,  (3,3), (1,1))
    out1  = neuromancer.Conv(out0, 64,  (3,3), (1,1))
    out2  = neuromancer.Pool(out1, (2,2))

    out3  = neuromancer.Conv(out2, 128,  (3,3), (1,1))
    out4  = neuromancer.Conv(out3, 128,  (3,3), (1,1))
    out5  = neuromancer.Pool(out4, (2,2))

    out6  = neuromancer.Conv(out5, 256,  (3,3), (1,1))
    out7  = neuromancer.Conv(out6, 256,  (3,3), (1,1))
    out8  = neuromancer.Pool(out7, (2,2))

    out9  = neuromancer.Conv(out8, 512,  (3,3), (1,1))
    out10 = neuromancer.Conv(out9, 512,  (3,3), (1,1))
    out11 = neuromancer.Pool(out10, (2,2))

    out12 = neuromancer.Conv(out11, 1024,  (3,3), (1,1))
    out13 = neuromancer.Conv(out12, 1024,  (3,3), (1,1))
    out14 = neuromancer.Pool(out13, (2,2))

    ####

    up0 = neuromancer.UpConvMerge(out10, out14, 1024)
    up1 = neuromancer.Conv(up0, 512,  (3,3), (1,1))
    up2 = neuromancer.Conv(up1, 512,  (3,3), (1,1))

    up3 = neuromancer.UpConvMerge(out7, up2, 512)
    up4 = neuromancer.Conv(up3, 256,  (3,3), (1,1))
    up5 = neuromancer.Conv(up4, 256,  (3,3), (1,1))

    up6 = neuromancer.UpConvMerge(out4, up5, 256)
    up7 = neuromancer.Conv(up6, 128,  (3,3), (1,1))
    up8 = neuromancer.Conv(up7, 128,  (3,3), (1,1))

    up9 = neuromancer.UpConvMerge(out1, up8, 128)
    up10 = neuromancer.Conv(up9, 64,  (3,3), (1,1))
    top_feat = neuromancer.Conv(up10, 64,  (3,3), (1,1))


    # Target outputs
    barr_out = neuromancer.Conv(top_feat,  3, (1,1), (1,1), activation_func='lin', name='barr')
    obj_out  = neuromancer.Conv(top_feat,  4, (1,1), (1,1), activation_func='lin', name='obj')
    my_out   = neuromancer.Conv(top_feat,  3, (1,1), (1,1), activation_func='lin', name='my')
    barr_out = neuromancer.Softmax(barr_out)
    obj_out  = neuromancer.Softmax(obj_out)
    my_out   = neuromancer.Softmax(my_out)

    target   = neuromancer.Input_like(top_feat, dtype='int16', override_f=3, name='target')
    barr, obj, my = neuromancer.split(target, 'f', n_out=3, name=['barr_t', 'obj_t', 'my_t'])

    # Target loss
    barr_loss_pix = neuromancer.MultinoulliNLL(barr_out, barr, target_is_sparse=True,name='nll_barr')
    obj_loss_pix  = neuromancer.MultinoulliNLL(obj_out, obj, target_is_sparse=True, name='nll_obj')
    my_loss_pix   = neuromancer.MultinoulliNLL(my_out, my, target_is_sparse=True, name='nll_my')
    pred          = neuromancer.Concat([barr_out, obj_out, my_out], axis='f')
    pred.feature_names = ['barrier_bg', 'barr_mem', 'barr_ecs', 'obj_bg',
                          'obj_mito', 'obj_ves', 'obj_syn', 'my_bg', 'my_out', 'my_in']

    # Objective
    weights = np.array([2.154, 0.42, 0.42])
    weights *= len(weights) / weights.sum()
    loss = neuromancer.AggregateLoss([barr_loss_pix,
                                      obj_loss_pix,
                                      my_loss_pix],
                                      mixing_weights=weights)
    # Monitoring  / Debug outputs
    nll_barr   = neuromancer.ApplyFunc(barr_loss_pix, T.mean, name='mnll_barr')
    nll_obj    = neuromancer.ApplyFunc(obj_loss_pix, T.mean, name='mnll_obj')
    nll_my   = neuromancer.ApplyFunc(my_loss_pix, T.mean, name='mnll_my')
    errors = neuromancer.Errors(barr_out, barr, target_is_sparse=True)

    model = neuromancer.model_manager.getmodel()
    model.designate_nodes(input_node=img, target_node=target, loss_node=loss,
                                  prediction_node=pred,
                                  prediction_ext=[loss, errors, pred],
                                  debug_outputs =[nll_barr, errors, nll_obj, nll_my])

    return model
示例#4
0
def create_model():
    from elektronn2 import neuromancer as nm
    trainee = trainee_dict["create_model"]()
    inp = trainee.input_node
    trainee_gt = trainee.target_node
    trainee_out = trainee.prediction_node
    trainee_loss = trainee.loss_node
    adv_input, adv_target = nm.advmerge(trainee_out, trainee_gt)

    # raw data
    diff = np.array(inp.shape.spatial_shape, dtype=np.int32) - np.array(
        trainee_out.shape.spatial_shape, dtype=np.int32)
    assert not np.any(diff % 2)
    raw_inp = nm.Crop(inp, list((diff // 2)))
    conv0 = nm.Conv(raw_inp,
                    5, (2, 3, 3),
                    activation_func=act,
                    dropout_rate=dr,
                    name="conv_adv")
    conv1 = nm.Conv(conv0,
                    10, (2, 3, 3),
                    activation_func=act,
                    dropout_rate=dr,
                    name="conv_adv")
    down0 = nm.Pool(conv1, (1, 2, 2), mode='max')  # mid res
    conv2 = nm.Conv(down0,
                    10, (2, 3, 3),
                    activation_func=act,
                    dropout_rate=dr,
                    name="conv_adv")
    conv3 = nm.Conv(conv2,
                    15, (2, 3, 3),
                    activation_func=act,
                    dropout_rate=dr,
                    name="conv_adv")
    # Merging low-res with mid-res features
    mrg1 = nm.UpConvMerge(conv1, conv3, 20, name="upconv_adv")
    mconv2 = nm.Conv(mrg1,
                     15, (2, 3, 3),
                     activation_func=act,
                     dropout_rate=dr,
                     name="conv_adv")
    raw_out = nm.Conv(mconv2,
                      15, (2, 3, 3),
                      activation_func=act,
                      dropout_rate=dr,
                      name="conv_adv")

    # segmentation
    conv0 = nm.Conv(adv_input,
                    5, (2, 3, 3),
                    activation_func=act,
                    dropout_rate=dr,
                    name="conv_adv")
    conv1 = nm.Conv(conv0,
                    10, (2, 3, 3),
                    activation_func=act,
                    dropout_rate=dr,
                    name="conv_adv")
    down0 = nm.Pool(conv1, (1, 2, 2), mode='max')  # mid res
    conv2 = nm.Conv(down0,
                    10, (2, 3, 3),
                    activation_func=act,
                    dropout_rate=dr,
                    name="conv_adv")
    conv3 = nm.Conv(conv2,
                    15, (2, 3, 3),
                    activation_func=act,
                    dropout_rate=dr,
                    name="conv_adv")
    # Merging low-res with mid-res features
    mrg1 = nm.UpConvMerge(conv1, conv3, 20, name="upconv_adv")
    mconv2 = nm.Conv(mrg1,
                     15, (2, 3, 3),
                     activation_func=act,
                     dropout_rate=dr,
                     name="conv_adv")
    seg_out = nm.Conv(mconv2,
                      15, (2, 3, 3),
                      activation_func=act,
                      dropout_rate=dr,
                      name="conv_adv")

    out = nm.Concat([raw_out, seg_out], axis="f")
    out = nm.Conv(out,
                  20, (2, 5, 5),
                  pool_shape=(1, 2, 2),
                  activation_func=act,
                  dropout_rate=dr,
                  name="conv_adv")
    out = nm.Conv(out,
                  40, (2, 3, 3),
                  pool_shape=(1, 2, 2),
                  activation_func=act,
                  dropout_rate=dr,
                  name="conv_adv")
    out = nm.Conv(out,
                  60, (2, 2, 2),
                  pool_shape=(1, 2, 2),
                  activation_func=act,
                  dropout_rate=dr,
                  name="conv_adv")
    dec = nm.Perceptron(out,
                        2,
                        flatten=True,
                        activation_func='lin',
                        name="perc_adv")
    adv_out = nm.Softmax(dec)

    # as in orig. GAN paper of Godfellow et al. only the positive label is taken
    # into account (i.e. adversarial network predicts input is ground truth)
    # for the adv. prediction loss when training the trainee
    # if training the adv. network only the binary cross-entropy of the adv.
    # prediction is backpropagated.
    adv_loss = nm.MultinoulliNLL(adv_out,
                                 adv_target,
                                 target_is_sparse=True,
                                 name='nll_adversarial',
                                 class_weights=None)
    loss = nm.AggregateLoss([adv_loss, trainee_loss],
                            mixing_weights=None,
                            name='loss_adversarial')

    model = nm.model_manager.getmodel()
    model.designate_nodes(
        input_node=inp,
        target_node=trainee.target_node,
        loss_node=loss,
        prediction_node=trainee.prediction_node,
        prediction_ext=trainee.prediction_ext,
    )
    return model