def create_model(): from elektronn2 import neuromancer in_sh = (20, 20 * 58) inp = neuromancer.Input(in_sh, 'b,f', name='raw') out = neuromancer.Perceptron(inp, 700, 'lin') out = neuromancer.Perceptron(inp, 500, 'lin') out = neuromancer.Perceptron(inp, 300, 'lin') out = neuromancer.Perceptron(out, 2*58, 'lin') out = neuromancer.Softmax(out, n_indep=58) target = neuromancer.Input_like(out, override_f=1, name='target') weights = neuromancer.ValueNode((116, ), 'f', value=[0.2, 1.8]*58) loss = neuromancer.MultinoulliNLL( out, target, target_is_sparse=True, class_weights=weights, name='nll' ) # Objective loss = neuromancer.AggregateLoss(loss) # Monitoring / Debug outputs errors = neuromancer.Errors(out, target, target_is_sparse=True) model = neuromancer.model_manager.getmodel() model.designate_nodes( input_node=inp, target_node=target, loss_node=loss, prediction_node=out, prediction_ext=[loss, errors, out] ) return model
def create_model(): from elektronn2 import neuromancer act = 'relu' in_sh = (batch_size, 1 if data_init_kwargs["raw_only"] else 4, nb_views, int(x), int(y)) inp = neuromancer.Input(in_sh, 'b,f,z,y,x', name='raw') out0 = neuromancer.Conv(inp, 13, (1, 5, 5), (1, 2, 2), activation_func=act, dropout_rate=dr) out0 = neuromancer.Conv(out0, 19, (1, 5, 5), (1, 2, 2), activation_func=act, dropout_rate=dr) out0 = neuromancer.Conv(out0, 25, (1, 4, 4), (1, 2, 2), activation_func=act, dropout_rate=dr) out0 = neuromancer.Conv(out0, 25, (1, 4, 4), (1, 2, 2), activation_func=act, dropout_rate=dr) out0 = neuromancer.Conv(out0, 30, (1, 2, 2), (1, 2, 2), activation_func=act, dropout_rate=dr) out0 = neuromancer.Conv(out0, 30, (1, 1, 1), (1, 2, 2), activation_func=act, dropout_rate=dr) out = neuromancer.Conv(out0, 31, (1, 1, 1), (1, 1, 1), activation_func=act, dropout_rate=dr) out = neuromancer.Perceptron(out, 50, flatten=True, dropout_rate=dr) out = neuromancer.Perceptron(out, 30, flatten=True, dropout_rate=dr) out = neuromancer.Perceptron(out, 4, activation_func='lin') out = neuromancer.Softmax(out) target = neuromancer.Input_like(out, override_f=1, name='target') weights = neuromancer.ValueNode((4, ), 'f', value=(1, 1, 2, 2)) loss = neuromancer.MultinoulliNLL(out, target, name='nll_', target_is_sparse=True, class_weights=weights) # Objective loss = neuromancer.AggregateLoss(loss) # Monitoring / Debug outputs errors = neuromancer.Errors(out, target, target_is_sparse=True) model = neuromancer.model_manager.getmodel() model.designate_nodes(input_node=inp, target_node=target, loss_node=loss, prediction_node=out, prediction_ext=[loss, errors, out]) return model
def create_model(): from elektronn2 import neuromancer in_sh = (40, 40, 58) inp = neuromancer.Input(in_sh, 'r,b,f', name='raw') inp0, _ = neuromancer.split(inp, 'r', index=1, strip_singleton_dims=True) inp_mem = neuromancer.InitialState_like(inp0, override_f=750, init_kwargs={ 'mode': 'fix-uni', 'scale': 0.1 }) out = neuromancer.GRU(inp0, inp_mem, 750) out = neuromancer.Scan(out, inp_mem, in_iterate=inp, in_iterate_0=inp0, n_steps=40, last_only=True) out = neuromancer.Perceptron(out, 2 * 58, 'lin') out = neuromancer.Softmax(out, n_indep=58) target = neuromancer.Input_like(out, override_f=1, name='target') weights = neuromancer.ValueNode((116, ), 'f', value=(0.2, 1.8)) loss = neuromancer.MultinoulliNLL(out, target, name='nll', target_is_sparse=True, class_weights=weights) # Objective loss = neuromancer.AggregateLoss(loss) # Monitoring / Debug outputs errors = neuromancer.Errors(out, target, target_is_sparse=True) model = neuromancer.model_manager.getmodel() model.designate_nodes(input_node=inp, target_node=target, loss_node=loss, prediction_node=out, prediction_ext=[loss, errors, out]) return model