Пример #1
0
def create_model():
    from elektronn2 import neuromancer
    in_sh = (None, 1, 111+16*5, 111+16*5, 13+4*5)
    inp = neuromancer.Input(in_sh, 'b,f,x,y,z', name='raw')

    out = neuromancer.Conv(inp, 12, (6, 6, 1), (2, 2, 1))
    out = neuromancer.Conv(out, 24, (4, 4, 1), (2, 2, 1))
    out = neuromancer.Conv(out, 36, (4, 4, 4), (2, 2, 2))
    out = neuromancer.Conv(out, 48, (4, 4, 2), (2, 2, 2))
    out = neuromancer.Conv(out, 48, (4, 4, 2), (1, 1, 1))
    out = neuromancer.Conv(out, 4, (1, 1, 1), (1, 1, 1),
                           activation_func='lin', name='sy')
    probs = neuromancer.Softmax(out)

    target = neuromancer.Input_like(probs, override_f=1, name='target')
    loss_pix = neuromancer.MultinoulliNLL(probs, target, target_is_sparse=True)

    loss = neuromancer.AggregateLoss(loss_pix, name='loss')
    errors = neuromancer.Errors(probs, target, target_is_sparse=True)

    model = neuromancer.model_manager.getmodel()
    model.designate_nodes(
        input_node=inp,
        target_node=target,
        loss_node=loss,
        prediction_node=probs,
        prediction_ext=[loss, errors, probs]
    )

    return model
Пример #2
0
def create_model():
    from elektronn2 import neuromancer as nm
    in_sh = (None, 1, 11, 155, 155)
    inp = nm.Input(in_sh, 'b,f,z,x,y', name='raw')

    out = nm.Conv(inp, 20, (1, 4, 4), (1, 2, 2))
    out = nm.Conv(out, 40, (3, 3, 3), (1, 2, 2))

    out = nm.Conv(out, 150, (2, 4, 4), (2, 1, 1))
    out = nm.Conv(out, 200, (1, 3, 3))
    out = nm.Conv(out, 200, (1, 3, 3))

    out = nm.Conv(out, 200, (1, 1, 1))
    out = nm.Conv(out, 2, (1, 1, 1), activation_func='lin')
    probs = nm.Softmax(out)

    target = nm.Input_like(probs, override_f=1, name='target')
    loss_pix = nm.MultinoulliNLL(probs, target, target_is_sparse=True)

    loss = nm.AggregateLoss(loss_pix, name='loss')
    errors = nm.Errors(probs, target, target_is_sparse=True)

    model = nm.model_manager.getmodel()
    model.designate_nodes(input_node=inp,
                          target_node=target,
                          loss_node=loss,
                          prediction_node=probs,
                          prediction_ext=[loss, errors, probs])
    return model
Пример #3
0
def create_model():
    from elektronn2 import neuromancer as nm

    in_sh = (None, 1, 26, 26)
    inp = nm.Input(in_sh, 'b,f,y,x', name='raw')

    out = nm.Conv(inp, 12, (3, 3), (2, 2), batch_normalisation='train')
    out = nm.Conv(out, 36, (3, 3), (2, 2), batch_normalisation='train')
    out = nm.Conv(out, 64, (3, 3), (1, 1), batch_normalisation='train')
    out = nm.Perceptron(out, 200, flatten=True)
    out = nm.Perceptron(out, 10, activation_func='lin')
    out = nm.Softmax(out)
    target = nm.Input_like(out, override_f=1, name='target')
    loss = nm.MultinoulliNLL(out, target, name='nll_', target_is_sparse=True)
    # Objective
    loss = nm.AggregateLoss(loss)
    # Monitoring / Debug outputs
    errors = nm.Errors(out, target, target_is_sparse=True)

    model = nm.model_manager.getmodel()
    model.designate_nodes(input_node=inp,
                          target_node=target,
                          loss_node=loss,
                          prediction_node=out,
                          prediction_ext=[loss, errors, out])
    return model
Пример #4
0
def create_model():
    from elektronn2 import neuromancer
    in_sh = (20, 20 * 58)
    inp = neuromancer.Input(in_sh, 'b,f', name='raw')
    out = neuromancer.Perceptron(inp, 700, 'lin')
    out = neuromancer.Perceptron(inp, 500, 'lin')
    out = neuromancer.Perceptron(inp, 300, 'lin')
    out = neuromancer.Perceptron(out, 2*58, 'lin')
    out = neuromancer.Softmax(out, n_indep=58)
    target = neuromancer.Input_like(out, override_f=1, name='target')
    weights = neuromancer.ValueNode((116, ), 'f', value=[0.2, 1.8]*58)
    loss = neuromancer.MultinoulliNLL(
        out, target, target_is_sparse=True, class_weights=weights, name='nll'
    )
    # Objective
    loss = neuromancer.AggregateLoss(loss)
    # Monitoring  / Debug outputs
    errors = neuromancer.Errors(out, target, target_is_sparse=True)

    model = neuromancer.model_manager.getmodel()
    model.designate_nodes(
        input_node=inp,
        target_node=target,
        loss_node=loss,
        prediction_node=out,
        prediction_ext=[loss, errors, out]
    )
    return model
Пример #5
0
def create_model():
    import numpy as np
    from elektronn2 import neuromancer
    from elektronn2.neuromancer import graph_manager
    from elektronn2.model import Model

    graph_manager.reset()

    inp = neuromancer.Input((1, 1, 1, 59, 59), 'b,f,x,y,z', name='raw')
    out = neuromancer.Conv(inp, 2, (1, 6, 6), (1, 2, 2), dropout_rate=0.8)
    out = neuromancer.Conv(out, 3, (1, 6, 6), (1, 2, 2), dropout_rate=0.5)
    out = neuromancer.Conv(out, 4, (1, 4, 4), (1, 1, 1))
    out = neuromancer.Conv(out, 5, (1, 1, 1), (1, 1, 1))
    out = neuromancer.Conv(out, 2, (1, 1, 1), (1, 1, 1), activation_func='lin')

    out = neuromancer.Softmax(out, n_indep=1, name='probs')

    l_sh = out.shape.copy()
    l_sh.updateshape('f', 1)
    l = neuromancer.Input_like(l_sh, dtype='int16', name='labels')
    loss_pix = neuromancer.MultinoulliNLL(out, l, target_is_sparse=True)
    loss = neuromancer.AggregateLoss(loss_pix, name='nll')

    graph_manager.designate_nodes(input=inp,
                                  target=l,
                                  loss=loss,
                                  prediction=out,
                                  prediction_ext=[loss, loss, out])
    m = Model(graph_manager)
    return m
Пример #6
0
def create_model():
    from elektronn2 import neuromancer

    act = 'relu'
    in_sh = (batch_size, 1 if data_init_kwargs["raw_only"] else 4, nb_views,
             int(x), int(y))
    inp = neuromancer.Input(in_sh, 'b,f,z,y,x', name='raw')
    out0 = neuromancer.Conv(inp,
                            13, (1, 5, 5), (1, 2, 2),
                            activation_func=act,
                            dropout_rate=dr)
    out0 = neuromancer.Conv(out0,
                            19, (1, 5, 5), (1, 2, 2),
                            activation_func=act,
                            dropout_rate=dr)
    out0 = neuromancer.Conv(out0,
                            25, (1, 4, 4), (1, 2, 2),
                            activation_func=act,
                            dropout_rate=dr)
    out0 = neuromancer.Conv(out0,
                            25, (1, 4, 4), (1, 2, 2),
                            activation_func=act,
                            dropout_rate=dr)
    out0 = neuromancer.Conv(out0,
                            30, (1, 2, 2), (1, 2, 2),
                            activation_func=act,
                            dropout_rate=dr)
    out0 = neuromancer.Conv(out0,
                            30, (1, 1, 1), (1, 2, 2),
                            activation_func=act,
                            dropout_rate=dr)
    out = neuromancer.Conv(out0,
                           31, (1, 1, 1), (1, 1, 1),
                           activation_func=act,
                           dropout_rate=dr)

    out = neuromancer.Perceptron(out, 50, flatten=True, dropout_rate=dr)
    out = neuromancer.Perceptron(out, 30, flatten=True, dropout_rate=dr)
    out = neuromancer.Perceptron(out, 4, activation_func='lin')
    out = neuromancer.Softmax(out)
    target = neuromancer.Input_like(out, override_f=1, name='target')
    weights = neuromancer.ValueNode((4, ), 'f', value=(1, 1, 2, 2))
    loss = neuromancer.MultinoulliNLL(out,
                                      target,
                                      name='nll_',
                                      target_is_sparse=True,
                                      class_weights=weights)

    # Objective
    loss = neuromancer.AggregateLoss(loss)
    # Monitoring  / Debug outputs
    errors = neuromancer.Errors(out, target, target_is_sparse=True)

    model = neuromancer.model_manager.getmodel()
    model.designate_nodes(input_node=inp,
                          target_node=target,
                          loss_node=loss,
                          prediction_node=out,
                          prediction_ext=[loss, errors, out])
    return model
Пример #7
0
def demo_new_split():
    act = 'tanh'

    data = neuromancer.Input((30, 10, 20), 'r,b,f', name='data')
    a, b = neuromancer.split(data, axis='r', index=1, name=['a', 'b'])
    c, d = neuromancer.split(data,
                             axis='r',
                             index=1,
                             strip_singleton_dims=True)

    x = np.random.rand(30, 10, 20).astype(np.float32)

    aa, bb = a(x), b(x)
    cc, dd = c(x), d(x)

    print(aa.shape, bb.shape, cc.shape, dd.shape)

    model = neuromancer.model_manager.current
    model.designate_nodes(input_node=data)

    # , target_node=target, loss_node=loss,
    #                                  prediction_node=pred,
    #                                  prediction_ext=[loss, nll_barr, pred],
    #                                  debug_outputs =[nll_barr, nll_obj, nll_branch, mse_skel])

    model.save("/tmp/test.mdl")
    model2 = neuromancer.model.modelload("/tmp/test.mdl")
Пример #8
0
def create_model():
    from elektronn2 import neuromancer as nm

    in_sh = (None, 1, 22, 140, 140)
    inp = nm.Input(in_sh, 'b,f,z,x,y', name='raw')  # high res

    # Convolution, downsampling of intermediate features
    conv0 = nm.Conv(inp, 20, (1, 3, 3))
    conv1 = nm.Conv(conv0, 20, (1, 3, 3))
    down0 = nm.Pool(conv1, (1, 2, 2), mode='max')  # mid res
    conv2 = nm.Conv(down0, 30, (1, 3, 3))
    conv3 = nm.Conv(conv2, 30, (1, 3, 3))
    down1 = nm.Pool(conv3, (1, 2, 2), mode='max')  # low res
    conv4 = nm.Conv(down1, 35, (1, 3, 3))
    conv5 = nm.Conv(conv4, 35, (1, 3, 3))
    down2 = nm.Pool(conv5, (1, 2, 2), mode='max')  # very low res
    conv6 = nm.Conv(down2, 42, (3, 3, 3))
    down2b = nm.Pool(conv6, (1, 2, 2), mode='max')  # very low res, even lower
    conv7 = nm.Conv(down2b, 42, (3, 3, 3))

    # Merging very low-res features with low-res features
    mrg0 = nm.UpConvMerge(conv5, conv7, 45)
    mconv0 = nm.Conv(mrg0, 42, (1, 3, 3))
    mconv1 = nm.Conv(mconv0, 42, (1, 3, 3))

    # Merging low-res with mid-res features
    mrg1 = nm.UpConvMerge(conv3, mconv1, 42)
    mconv2 = nm.Conv(mrg1, 35, (3, 3, 3))
    mconv3 = nm.Conv(mconv2, 35, (3, 3, 3))

    # Merging mid-res with high-res features
    mrg2 = nm.UpConvMerge(conv1, mconv3, 30)
    mconv4 = nm.Conv(mrg2, 20, (3, 3, 3))
    mconv5 = nm.Conv(mconv4, 20, (3, 3, 3))

    barr = nm.Conv(mconv5, 2, (1, 1, 1), activation_func='lin', name='barr')
    probs = nm.Softmax(barr)

    target = nm.Input_like(mconv5, override_f=1, name='target')

    loss_pix = nm.MultinoulliNLL(probs,
                                 target,
                                 target_is_sparse=True,
                                 name='nll_barr')

    loss = nm.AggregateLoss(loss_pix, name='loss')
    errors = nm.Errors(probs, target, target_is_sparse=True)

    model = nm.model_manager.getmodel()
    model.designate_nodes(input_node=inp,
                          target_node=target,
                          loss_node=loss,
                          prediction_node=probs,
                          prediction_ext=[loss, errors, probs])
    return model
Пример #9
0
def demo_restore():
    # records = ut.pickleload('/tmp/model.pkl')
    # graph_manager.restore(records)
    # print "="*50
    # print "="*50
    # out = graph_manager.sinks
    # print out
    # x_val = np.linspace(0,1, num=64*784).astype(np.float32).reshape((64, 784))
    # loss = graph_manager.nodes['loss']
    # enc_mu = graph_manager.nodes['enc mu']
    # print loss(x_val)
    # print enc_mu(x_val)

    # pack = False
    # if pack:
    #    inp = gr.Input((2,6,20,20), 'b,f,x,y')
    #    out1, out2 = gr.split(inp, 'f', 2, name='test_split')
    #    x_val = np.random.rand(2,6,20,20).astype(np.float32)
    #    print out1(x_val).shape, out2(x_val).shape
    #    ut.picklesave(graph_manager.get_records(), '/tmp/model2.pkl')
    #    print graph_manager.nodes.keys()
    #
    # if not pack:
    #    graph_manager.reset()
    #    records = ut.pickleload('/tmp/model2.pkl')
    #    graph_manager.restore(records)
    #    print "="*50
    #    print "="*50
    #    out = graph_manager.sinks
    #    print out
    #    out1 = graph_manager.nodes['test_split1']
    #    out2 = graph_manager.nodes['test_split2']
    #    x_val = np.random.rand(2,6,20,20).astype(np.float32)
    #    print out1(x_val).shape, out2(x_val).shape
    #    print graph_manager.nodes.keys()

    in_sh = (1, 1, 183, 183, 31)
    x = gr.Input(in_sh, 'b,f,z,x,y')
    in_val = np.random.rand(*in_sh).astype(np.float32)

    y1 = Conv(x, 12, (1, 6, 6)[::-1], (1, 2, 2)[::-1], mfp=False)
    y2 = Conv(y1, 24, (4, 4, 4)[::-1], (2, 2, 2)[::-1], mfp=False)
    y3 = Conv(y2, 64, (4, 4, 4)[::-1], (1, 2, 2)[::-1], mfp=False)
    y4 = Conv(y3, 64, (4, 4, 4)[::-1], (1, 1, 1)[::-1], mfp=False)
    # z4 = Perceptron(y4, 12)
    s4 = UpConv(y4, 64, (4, 4, 4), (2, 2, 2))
    d4 = y4.make_dual(y4, activation_func='lin')
    p4 = gr.Softmax(d4)

    lab = gr.Input_like(p4, name='lab')
    loss = gr.loss.MultinoulliNLL(p4, lab)
Пример #10
0
def demo_pooling_2d():
    img = plt.imread(os.path.expanduser('~/devel/Lichtenstein.png')).transpose(
        (2, 0, 1))[None, ]
    sig_shape = img.shape
    # x_val = np.random.rand(*sig_shape).astype(np.float32)
    inp = neuromancer.Input(sig_shape, 'b,f,x,y')
    conv = neuromancer.Conv(inp, 3, (3, 3), (3, 3), activation_func='tanh')
    w = np.zeros((3, 3, 3, 3), dtype=np.float32)
    w[[0, 1, 2], [0, 1, 2]] = 1
    upconv = neuromancer.UpConv(conv, 3, (3, 3), activation_func='tanh')
    upconv.w.set_value(upconv.w.get_value() * 0.07 + w)

    y0 = conv(img)
    y2 = upconv(img)

    plt.imshow(y0[0].transpose((1, 2, 0)), interpolation='none')
    plt.show()
    plt.figure()
    plt.imshow(y2[0].transpose((1, 2, 0)), interpolation='none')
    plt.show()
Пример #11
0
def create_model():
    from elektronn2 import neuromancer

    in_sh = (40, 40, 58)
    inp = neuromancer.Input(in_sh, 'r,b,f', name='raw')
    inp0, _ = neuromancer.split(inp, 'r', index=1, strip_singleton_dims=True)
    inp_mem = neuromancer.InitialState_like(inp0,
                                            override_f=750,
                                            init_kwargs={
                                                'mode': 'fix-uni',
                                                'scale': 0.1
                                            })
    out = neuromancer.GRU(inp0, inp_mem, 750)
    out = neuromancer.Scan(out,
                           inp_mem,
                           in_iterate=inp,
                           in_iterate_0=inp0,
                           n_steps=40,
                           last_only=True)
    out = neuromancer.Perceptron(out, 2 * 58, 'lin')
    out = neuromancer.Softmax(out, n_indep=58)
    target = neuromancer.Input_like(out, override_f=1, name='target')
    weights = neuromancer.ValueNode((116, ), 'f', value=(0.2, 1.8))
    loss = neuromancer.MultinoulliNLL(out,
                                      target,
                                      name='nll',
                                      target_is_sparse=True,
                                      class_weights=weights)
    # Objective
    loss = neuromancer.AggregateLoss(loss)
    # Monitoring  / Debug outputs
    errors = neuromancer.Errors(out, target, target_is_sparse=True)

    model = neuromancer.model_manager.getmodel()
    model.designate_nodes(input_node=inp,
                          target_node=target,
                          loss_node=loss,
                          prediction_node=out,
                          prediction_ext=[loss, errors, out])
    return model
Пример #12
0
def create_model():
    from elektronn2 import neuromancer
    import theano.tensor as T
    import numpy as np

    in_sh = (None,1,572-32*9,572-32*9)
    img = neuromancer.Input(in_sh, 'b,f,x,y', name='raw')

    out0  = neuromancer.Conv(img,  64,  (3,3), (1,1))
    out1  = neuromancer.Conv(out0, 64,  (3,3), (1,1))
    out2  = neuromancer.Pool(out1, (2,2))

    out3  = neuromancer.Conv(out2, 128,  (3,3), (1,1))
    out4  = neuromancer.Conv(out3, 128,  (3,3), (1,1))
    out5  = neuromancer.Pool(out4, (2,2))

    out6  = neuromancer.Conv(out5, 256,  (3,3), (1,1))
    out7  = neuromancer.Conv(out6, 256,  (3,3), (1,1))
    out8  = neuromancer.Pool(out7, (2,2))

    out9  = neuromancer.Conv(out8, 512,  (3,3), (1,1))
    out10 = neuromancer.Conv(out9, 512,  (3,3), (1,1))
    out11 = neuromancer.Pool(out10, (2,2))

    out12 = neuromancer.Conv(out11, 1024,  (3,3), (1,1))
    out13 = neuromancer.Conv(out12, 1024,  (3,3), (1,1))
    out14 = neuromancer.Pool(out13, (2,2))

    ####

    up0 = neuromancer.UpConvMerge(out10, out14, 1024)
    up1 = neuromancer.Conv(up0, 512,  (3,3), (1,1))
    up2 = neuromancer.Conv(up1, 512,  (3,3), (1,1))

    up3 = neuromancer.UpConvMerge(out7, up2, 512)
    up4 = neuromancer.Conv(up3, 256,  (3,3), (1,1))
    up5 = neuromancer.Conv(up4, 256,  (3,3), (1,1))

    up6 = neuromancer.UpConvMerge(out4, up5, 256)
    up7 = neuromancer.Conv(up6, 128,  (3,3), (1,1))
    up8 = neuromancer.Conv(up7, 128,  (3,3), (1,1))

    up9 = neuromancer.UpConvMerge(out1, up8, 128)
    up10 = neuromancer.Conv(up9, 64,  (3,3), (1,1))
    top_feat = neuromancer.Conv(up10, 64,  (3,3), (1,1))


    # Target outputs
    barr_out = neuromancer.Conv(top_feat,  3, (1,1), (1,1), activation_func='lin', name='barr')
    obj_out  = neuromancer.Conv(top_feat,  4, (1,1), (1,1), activation_func='lin', name='obj')
    my_out   = neuromancer.Conv(top_feat,  3, (1,1), (1,1), activation_func='lin', name='my')
    barr_out = neuromancer.Softmax(barr_out)
    obj_out  = neuromancer.Softmax(obj_out)
    my_out   = neuromancer.Softmax(my_out)

    target   = neuromancer.Input_like(top_feat, dtype='int16', override_f=3, name='target')
    barr, obj, my = neuromancer.split(target, 'f', n_out=3, name=['barr_t', 'obj_t', 'my_t'])

    # Target loss
    barr_loss_pix = neuromancer.MultinoulliNLL(barr_out, barr, target_is_sparse=True,name='nll_barr')
    obj_loss_pix  = neuromancer.MultinoulliNLL(obj_out, obj, target_is_sparse=True, name='nll_obj')
    my_loss_pix   = neuromancer.MultinoulliNLL(my_out, my, target_is_sparse=True, name='nll_my')
    pred          = neuromancer.Concat([barr_out, obj_out, my_out], axis='f')
    pred.feature_names = ['barrier_bg', 'barr_mem', 'barr_ecs', 'obj_bg',
                          'obj_mito', 'obj_ves', 'obj_syn', 'my_bg', 'my_out', 'my_in']

    # Objective
    weights = np.array([2.154, 0.42, 0.42])
    weights *= len(weights) / weights.sum()
    loss = neuromancer.AggregateLoss([barr_loss_pix,
                                      obj_loss_pix,
                                      my_loss_pix],
                                      mixing_weights=weights)
    # Monitoring  / Debug outputs
    nll_barr   = neuromancer.ApplyFunc(barr_loss_pix, T.mean, name='mnll_barr')
    nll_obj    = neuromancer.ApplyFunc(obj_loss_pix, T.mean, name='mnll_obj')
    nll_my   = neuromancer.ApplyFunc(my_loss_pix, T.mean, name='mnll_my')
    errors = neuromancer.Errors(barr_out, barr, target_is_sparse=True)

    model = neuromancer.model_manager.getmodel()
    model.designate_nodes(input_node=img, target_node=target, loss_node=loss,
                                  prediction_node=pred,
                                  prediction_ext=[loss, errors, pred],
                                  debug_outputs =[nll_barr, errors, nll_obj, nll_my])

    return model
Пример #13
0
def create_model():
    from elektronn2 import neuromancer

    act = 'relu'
    in_sh = (batch_size, 4, 3, 128, 256)
    inp = neuromancer.Input(in_sh, 'b,f,z,y,x', name='raw')

    out0 = neuromancer.Conv(inp,
                            13, (1, 5, 5), (1, 2, 2),
                            activation_func=act,
                            dropout_rate=dr)
    out0 = neuromancer.Conv(out0,
                            19, (1, 5, 5), (1, 2, 2),
                            activation_func=act,
                            dropout_rate=dr)
    out0 = neuromancer.Conv(out0,
                            25, (1, 4, 4), (1, 2, 2),
                            activation_func=act,
                            dropout_rate=dr)
    out0 = neuromancer.Conv(out0,
                            25, (1, 4, 4), (1, 2, 2),
                            activation_func=act,
                            dropout_rate=dr)
    out0 = neuromancer.Conv(out0,
                            30, (1, 2, 2), (1, 2, 2),
                            activation_func=act,
                            dropout_rate=dr)
    out0 = neuromancer.Conv(out0,
                            30, (1, 1, 1), (1, 2, 2),
                            activation_func=act,
                            dropout_rate=dr)
    out = neuromancer.Conv(out0,
                           31, (1, 1, 1), (1, 1, 1),
                           activation_func=act,
                           dropout_rate=dr)
    out0, out1, out2 = neuromancer.split(out, axis="z", n_out=3)
    out0 = neuromancer.Reshape(out0,
                               shape=(inp.shape[0], out0.shape.stripbatch_prod,
                                      1),
                               tags="b,f,z")
    out1 = neuromancer.Reshape(out1,
                               shape=(inp.shape[0], out1.shape.stripbatch_prod,
                                      1),
                               tags="b,f,z")
    out2 = neuromancer.Reshape(out2,
                               shape=(inp.shape[0], out2.shape.stripbatch_prod,
                                      1),
                               tags="b,f,z")
    out = neuromancer.Concat([out0, out1, out2], axis="z")
    out = neuromancer.Perceptron(out, 40, flatten=False, dropout_rate=dr)
    out = neuromancer.Perceptron(out, 10, flatten=False, dropout_rate=dr)
    out0, out1, out2 = neuromancer.split(out, axis="z", n_out=3)
    d_small = neuromancer.EuclideanDistance(out0, out1)
    d_big = neuromancer.EuclideanDistance(out0, out2)
    loss = neuromancer.RampLoss(d_small, d_big, margin=0.2)
    loss = neuromancer.AggregateLoss(loss)
    model = neuromancer.model_manager.getmodel()
    # model = neuromancer.model.modelload("/wholebrain/scratch/pschuber/CNN_Training/nupa_cnn/t_net/ssv6_tripletnet_v9/ssv6_tripletnet_v9-FINAL.mdl")
    model.designate_nodes(input_node=inp,
                          target_node=None,
                          loss_node=loss,
                          prediction_node=out,
                          prediction_ext=[loss, loss, out])

    # params = neuromancer.model.params_from_model_file("/wholebrain/scratch/pschuber/CNN_Training/nupa_cnn/t_net/ssv6_tripletnet_v9/ssv6_tripletnet_v9-FINAL.mdl")
    # params = dict(filter(lambda x: x[0].startswith('conv'), params.items()))
    # model.set_param_values(params)
    return model


# if __name__ == "__main__":
# model = create_model()
# "Test" if model is saveable
# model.save("/tmp/"+save_name)
Пример #14
0
    ])
    f_grad = theano.function([x, skel_var], grad_var + [loss_var, pred_var])

    test_vals = np.array([1, 2, 3], dtype=np.float32)
    skel = DummySkel()

    print(f(test_vals, skel))
    print(f_grad(test_vals, skel))
    theano.printing.pydotprint(f_grad, '/tmp/step_result.svg')

    ###########################################################################

    from elektronn2 import neuromancer
    import theano
    act = 'tanh'
    data = neuromancer.Input((1, 20), 'b,f', name='data')
    mem_0 = neuromancer.Input((1, 120), 'b,f', name='mem')
    mlp1 = neuromancer.Dot(data, 120, activation_func=act)
    join = neuromancer.Concat([mlp1, mem_0])
    out = neuromancer.Dot(join, 120, activation_func=act)
    out2 = neuromancer.Dot(out, 13, activation_func='lin')
    # recurrent    = neuromancer.Scan(out, in_memory=mem_0, n_steps=10)
    recurrent, out2r = neuromancer.Scan([out, out2],
                                        out_memory=out,
                                        in_memory=mem_0,
                                        n_steps=7)
    loss = neuromancer.AggregateLoss(recurrent)
    loss = neuromancer.AggregateLoss(out2r)

    grad = theano.grad(loss.output,
                       loss.all_trainable_params.values(),
Пример #15
0
    "/Users/liammcgoldrick/Code/ELEKTRONN2/examples/affinities.npy")

raw_data = np.asarray(raw_data)
print(np.shape(affinities))

#data is in format:
#  raw data=(1,16,128,128) as NP array
#   affinities=(3,16,128,128) as NP array

###################
#   BUILD GRAPH   #
###################

in_sh = (None, 1, 16, 128, 128)

inp = nm.Input(in_sh, 'b,f,z,x,y', name='raw')  # high res

# Convolution, downsampling of intermediate features

conv0 = nm.Conv(inp, 8, (15, 15, 15), (1, 1, 1), "same", name="c0")
down0 = nm.Pool(conv0, (1, 4, 4), mode="max", name="d0")  #full

conv1 = nm.Conv(down0, 32, (15, 15, 15), (1, 1, 1), "same", name="c1")
down1 = nm.Pool(conv1, (1, 2, 2), mode="max", name="d1")  #high

conv2 = nm.Conv(down1, 64, (15, 15, 15), (1, 1, 1), "same", name="c2")  #mid
down2 = nm.Pool(conv2, (1, 2, 2), mode="max", name="d2")  #high

conv3 = nm.Conv(down2, 64, (15, 15, 15), (1, 1, 1), "same", name="c3")  #low

#merge C2 and C3 and convolve
Пример #16
0
def demo_new_gm():
    mfp = False
    in_sh = (1, 1, 15, 198, 198) if mfp else (1, 1, 25, 171, 171)
    inp = neuromancer.Input(in_sh, 'b,f,z,x,y', name='raw')

    out = neuromancer.Conv(inp,
                           20, (1, 6, 6), (1, 1, 1),
                           mfp=mfp,
                           batch_normalisation='train')
    out = neuromancer.Conv(out, 40, (1, 5, 5), (1, 2, 2), mfp=mfp)
    out = neuromancer.Conv(out, 50, (1, 4, 4), (1, 2, 2), mfp=mfp)
    out = neuromancer.Conv(out, 80, (1, 4, 4), (1, 1, 1), mfp=mfp)

    out = neuromancer.Conv(out, 80, (4, 1, 1), (2, 1, 1),
                           mfp=mfp)  # first z kernel, 2 pool
    out = neuromancer.Conv(out, 80, (3, 4, 4), (1, 1, 1), mfp=mfp)
    out = neuromancer.Conv(out, 80, (3, 4, 4), (1, 1, 1), mfp=mfp)
    out = neuromancer.Conv(out, 100, (2, 4, 4), (1, 1, 1), mfp=mfp)

    out = neuromancer.Conv(out, 120, (2, 4, 4), (1, 1, 1), mfp=mfp)
    out = neuromancer.Conv(out, 120, (1, 2, 2), (1, 1, 1), mfp=mfp)

    out = neuromancer.Conv(out, 120, (1, 1, 1), (1, 1, 1), mfp=mfp)
    out1, out2 = neuromancer.split(out, 1, n_out=2)

    probs = neuromancer.Conv(out1,
                             2, (1, 1, 1), (1, 1, 1),
                             mfp=mfp,
                             activation_func='lin')
    probs = neuromancer.Softmax(probs, name='probs')
    discard, mode = neuromancer.split(probs, 1, n_out=2)

    concentration = neuromancer.Conv(out2,
                                     1, (1, 1, 1), (1, 1, 1),
                                     mfp=mfp,
                                     activation_func='lin',
                                     name='concentration')
    t_sh = probs.shape.copy()
    t_sh.updateshape('f', 1)
    target = neuromancer.Input_like(t_sh, dtype='float32', name='target')

    loss_pix = neuromancer.BetaNLL(mode, concentration, target)
    loss = neuromancer.AggregateLoss(loss_pix)
    errors = neuromancer.Errors(probs, target, target_is_sparse=True)
    prediction = neuromancer.Concat([mode, concentration],
                                    axis=1,
                                    name='prediction')

    loss_std = neuromancer.ApplyFunc(loss_pix, T.std)

    model = neuromancer.model_manager.getmodel()
    model.designate_nodes(input_node=inp,
                          target_node=target,
                          loss_node=loss,
                          prediction_node=prediction,
                          prediction_ext=[loss, errors, prediction])

    ### --- ###

    model2 = neuromancer.model_manager.newmodel("second")
    inp2 = neuromancer.Input(in_sh, 'b,f,z,x,y', name='raw')

    out2 = neuromancer.Conv(inp2, 20, (1, 6, 6), (1, 1, 1), mfp=mfp)
    out2 = neuromancer.Conv(out2, 40, (1, 5, 5), (1, 2, 2), mfp=mfp)
    out2 = neuromancer.Conv(out2, 50, (1, 4, 4), (1, 2, 2), mfp=mfp)

    out2 = neuromancer.Conv(out2, 120, (2, 4, 4), (1, 1, 1), mfp=mfp)
    out2 = neuromancer.Conv(out2, 120, (1, 2, 2), (1, 1, 1), mfp=mfp)

    out2 = neuromancer.Conv(out2, 120, (1, 1, 1), (1, 1, 1), mfp=mfp)

    probs2 = neuromancer.Conv(out2,
                              2, (1, 1, 1), (1, 1, 1),
                              mfp=mfp,
                              activation_func='lin')
    probs2 = neuromancer.Softmax(probs2, name='probs')
    t_sh = probs2.shape.copy()
    t_sh.updateshape('f', 1)
    target2 = neuromancer.Input_like(t_sh, dtype='float32', name='target')

    loss_pix2 = neuromancer.MultinoulliNLL(probs2, target2)
    loss2 = neuromancer.AggregateLoss(loss_pix2)
    errors2 = neuromancer.Errors(probs2, target2, target_is_sparse=True)
    model2.designate_nodes(input_node=inp2,
                           target_node=target2,
                           loss_node=loss2,
                           prediction_node=probs2,
                           prediction_ext=[loss2, errors2, probs2])

    model.save('/tmp/test.pkl')
    model2.save('/tmp/test2.pkl')
    model2_reloaded = modelload('/tmp/test2.pkl')
    model2_reloaded.save('/tmp/test2_reloaded.pkl')

    print(neuromancer.model_manager)