def __init__(self, input_shapes, input_type='yx', **optsettings): lr = optsettings.get('learning_rate') or 0.01 optimizer = optsettings.get('optimizer') or 'adam' loss_fn = optsettings.get('loss_fn') or 'categorical_crossentropy' builder = builders.GraphBuilder() shape = input_shapes[input_type] in_name = builder.add_input_layer(shape, input_type, name='input') builder.start_new_path() builder.add_reshape_layer((*shape, 1)) builder.add_conv2d_layer(32, 3, activation='relu', regularizer="L2") builder.add_maxpool2d_layer(2) builder.add_lrn_layer() builder.add_conv2d_layer(64, 3, activation='relu', regularizer="L2") builder.add_maxpool2d_layer(2) builder.add_lrn_layer() builder.add_fc_layer(128, activation='tanh') builder.add_dropout_layer(0.8) builder.add_fc_layer(256, activation='tanh') builder.add_dropout_layer(0.8) out_name = builder.add_fc_layer(2, activation='softmax') builder.end_current_path() builder.finalize(out_name, name='target', learning_rate=lr, optimizer=optimizer, loss=loss_fn) super(self.__class__, self).__init__(builder) self.input_type = input_type self.input_shape = input_shapes[input_type] self.in_name, self.out_name = in_name, out_name
def __init__(self): builder = builders.GraphBuilder() input_layer = builder.add_input_layer((3, 3, 1), 'test') conv1 = builder.add_conv2d_layer(10, 3, filter_strides=1, weights_init='zeros', bias_init='zeros') fc1 = builder.add_fc_layer(10, weights_init='zeros', bias_init='zeros') fc2 = builder.add_fc_layer(3, weights_init='zeros', bias_init='zeros') builder.finalize(fc2) super(MockNeuralNetwork, self).__init__(builder) all_layers = builder.layers_dict self._in, self._out = input_layer, fc2 self.exp_inputs = {input_layer: all_layers[input_layer]} self.exp_output = {fc2: all_layers[fc2]} self.exp_trainable_layers = { conv1: all_layers[conv1], fc1: all_layers[fc1], fc2: all_layers[fc2], } self.exp_hidden_layers = { conv1: all_layers[conv1], fc1: all_layers[fc1], } self.exp_paths = {input_layer: [conv1, fc1, fc2]} example_input = nprand.randint(low=0, high=255, size=3 * 3) self.example_input = {self._in: example_input.reshape((1, 3, 3, 1))}
def __init__(self, input_shapes, input_type='yx', **optsettings): lr = optsettings.get('learning_rate') or 0.001 optimizer = optsettings.get('optimizer') or 'adam' loss_fn = optsettings.get('loss_fn') or 'categorical_crossentropy' builder = builders.GraphBuilder() shape = input_shapes[input_type] in_name = builder.add_input_layer(shape, input_type, name='input') builder.start_new_path() builder.add_reshape_layer((*shape, 1)) builder.add_conv2d_layer(10, 3, filter_strides=1, activation='relu', regularizer='L2', padding='same') encoder = builder.add_conv2d_layer(10, 3, filter_strides=1, activation='relu', regularizer='L2', padding='same') builder.add_conv2d_layer(1, 3, filter_strides=1, activation='relu', regularizer='L2', padding='same') decoder = builder.add_reshape_layer(shape) builder.end_current_path() builder.finalize(decoder, name='target', learning_rate=lr, optimizer=optimizer, loss=loss_fn) super(self.__class__, self).__init__(builder, encoder) self.input_type = input_type self.input_shape = input_shapes[input_type] self.in_name, self.out_name = in_name, decoder
def __init__(self, input_shapes, **optsettings): lr = optsettings.get('learning_rate') or 0.001 optimizer = optsettings.get('optimizer') or 'adam' loss_fn = optsettings.get('loss_fn') or 'categorical_crossentropy' builder = builders.GraphBuilder() shape = input_shapes['yx'] in_yx = builder.add_input_layer(shape, name='input_yx') builder.start_new_path() builder.add_reshape_layer((*shape, 1)) builder.add_conv2d_layer(32, 3, filter_strides=1, activation='relu', regularizer='L2', padding='same') builder.add_maxpool2d_layer(2) builder.add_lrn_layer() builder.add_conv2d_layer(64, 3, filter_strides=1, activation='relu', regularizer='L2', padding='same') builder.add_maxpool2d_layer(2) builder.add_lrn_layer() yx_out = builder.add_flatten_layer() builder.end_current_path() shape = input_shapes['gtux'] in_gtux = builder.add_input_layer(shape, name='input_gtux') builder.start_new_path() builder.add_reshape_layer((*shape, 1)) builder.add_conv2d_layer(32, 3, filter_strides=1, activation='relu', regularizer='L2', padding='same') builder.add_maxpool2d_layer(2) builder.add_lrn_layer() builder.add_conv2d_layer(64, 3, filter_strides=1, activation='relu', regularizer='L2', padding='same') builder.add_maxpool2d_layer(2) builder.add_lrn_layer() gtux_out = builder.add_flatten_layer() builder.end_current_path() shape = input_shapes['gtuy'] in_gtuy = builder.add_input_layer(shape, name='input_gtuy') builder.start_new_path() builder.add_reshape_layer((*shape, 1)) builder.add_conv2d_layer(32, 3, filter_strides=1, activation='relu', regularizer='L2', padding='same') builder.add_maxpool2d_layer(2) builder.add_lrn_layer() builder.add_conv2d_layer(64, 3, filter_strides=1, activation='relu', regularizer='L2', padding='same') builder.add_maxpool2d_layer(2) builder.add_lrn_layer() gtuy_out = builder.add_flatten_layer() builder.end_current_path() builder.add_merge_layer((yx_out, gtux_out, gtuy_out), 'concat') builder.start_new_path() builder.add_fc_layer(128, activation='relu') builder.add_dropout_layer(0.5) builder.add_fc_layer(50, activation='relu') builder.add_dropout_layer(0.5) out_name = builder.add_fc_layer(2, activation='softmax') builder.end_current_path() builder.finalize(out_name, name='target', learning_rate=lr, optimizer=optimizer, loss=loss_fn) super(self.__class__, self).__init__(builder) self.input_shapes = input_shapes.copy() self.in_yx, self.in_gtux, self.in_gtuy = in_yx, in_gtux, in_gtuy self.out_name = out_name