示例#1
0
 def block8(net, scale=1.0, activation="relu"):
     tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1')))
     tower_conv1_0 = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
     tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 224, [1,3], bias=False, activation=None, name='Conv2d_0b_1x3')))
     tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 256, [3,1], bias=False, name='Conv2d_0c_3x1')))
     tower_mixed = merge([tower_conv,tower_conv1_2], mode='concat', axis=3)
     tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
     net += scale * tower_out
     if activation:
         if isinstance(activation, str):
             net = activations.get(activation)(net)
         elif hasattr(activation, '__call__'):
             net = activation(net)
         else:
             raise ValueError("Invalid Activation.")
     return net
示例#2
0
文件: train.py 项目: Tairy/python-lab
def cnn():
    network = input_data(shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, 1], name='input')
    network = conv_2d(network, 8, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)
    network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)
    network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, CODE_LEN * MAX_CHAR, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    return network
示例#3
0
def _model5():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    def block35(net, scale=1.0, activation="relu"):
        tower_conv = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None, name='Conv2d_1x1')))
        tower_conv1_0 = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None,name='Conv2d_0a_1x1')))
        tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 32, 3, bias=False, activation=None,name='Conv2d_0b_3x3')))
        tower_conv2_0 = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
        tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2_0, 48,3, bias=False, activation=None, name='Conv2d_0b_3x3')))
        tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 64,3, bias=False, activation=None, name='Conv2d_0c_3x3')))
        tower_mixed = merge([tower_conv, tower_conv1_1, tower_conv2_2], mode='concat', axis=3)
        tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net

    def block17(net, scale=1.0, activation="relu"):
        tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1')))
        tower_conv_1_0 = relu(batch_normalization(conv_2d(net, 128, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
        tower_conv_1_1 = relu(batch_normalization(conv_2d(tower_conv_1_0, 160,[1,7], bias=False, activation=None,name='Conv2d_0b_1x7')))
        tower_conv_1_2 = relu(batch_normalization(conv_2d(tower_conv_1_1, 192, [7,1], bias=False, activation=None,name='Conv2d_0c_7x1')))
        tower_mixed = merge([tower_conv,tower_conv_1_2], mode='concat', axis=3)
        tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net


    def block8(net, scale=1.0, activation="relu"):
        tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1')))
        tower_conv1_0 = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
        tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 224, [1,3], bias=False, activation=None, name='Conv2d_0b_1x3')))
        tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 256, [3,1], bias=False, name='Conv2d_0c_3x1')))
        tower_mixed = merge([tower_conv,tower_conv1_2], mode='concat', axis=3)
        tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net


    num_classes = len(Y[0])
    dropout_keep_prob = 0.8

    network = input_data(shape=[None, inputSize, inputSize, dim],
             name='input',
             data_preprocessing=img_prep,
             data_augmentation=img_aug)
    conv1a_3_3 = relu(batch_normalization(conv_2d(network, 32, 3, strides=2, bias=False, padding='VALID',activation=None,name='Conv2d_1a_3x3')))
    conv2a_3_3 = relu(batch_normalization(conv_2d(conv1a_3_3, 32, 3, bias=False, padding='VALID',activation=None, name='Conv2d_2a_3x3')))
    conv2b_3_3 = relu(batch_normalization(conv_2d(conv2a_3_3, 64, 3, bias=False, activation=None, name='Conv2d_2b_3x3')))
    maxpool3a_3_3 = max_pool_2d(conv2b_3_3, 3, strides=2, padding='VALID', name='MaxPool_3a_3x3')
    conv3b_1_1 = relu(batch_normalization(conv_2d(maxpool3a_3_3, 80, 1, bias=False, padding='VALID',activation=None, name='Conv2d_3b_1x1')))
    conv4a_3_3 = relu(batch_normalization(conv_2d(conv3b_1_1, 192, 3, bias=False, padding='VALID',activation=None, name='Conv2d_4a_3x3')))
    maxpool5a_3_3 = max_pool_2d(conv4a_3_3, 3, strides=2, padding='VALID', name='MaxPool_5a_3x3')

    tower_conv = relu(batch_normalization(conv_2d(maxpool5a_3_3, 96, 1, bias=False, activation=None, name='Conv2d_5b_b0_1x1')))

    tower_conv1_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 48, 1, bias=False, activation=None, name='Conv2d_5b_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 64, 5, bias=False, activation=None, name='Conv2d_5b_b1_0b_5x5')))

    tower_conv2_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 64, 1, bias=False, activation=None, name='Conv2d_5b_b2_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2_0, 96, 3, bias=False, activation=None, name='Conv2d_5b_b2_0b_3x3')))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 96, 3, bias=False, activation=None,name='Conv2d_5b_b2_0c_3x3')))

    tower_pool3_0 = avg_pool_2d(maxpool5a_3_3, 3, strides=1, padding='same', name='AvgPool_5b_b3_0a_3x3')
    tower_conv3_1 = relu(batch_normalization(conv_2d(tower_pool3_0, 64, 1, bias=False, activation=None,name='Conv2d_5b_b3_0b_1x1')))

    tower_5b_out = merge([tower_conv, tower_conv1_1, tower_conv2_2, tower_conv3_1], mode='concat', axis=3)

    net = repeat(tower_5b_out, 10, block35, scale=0.17)
    '''
    tower_conv = relu(batch_normalization(conv_2d(net, 384, 3, bias=False, strides=2,activation=None, padding='VALID', name='Conv2d_6a_b0_0a_3x3')))
    tower_conv1_0 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 256, 3, bias=False, activation=None, name='Conv2d_6a_b1_0b_3x3')))
    tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 384, 3, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_6a_b1_0c_3x3')))
    tower_pool = max_pool_2d(net, 3, strides=2, padding='VALID',name='MaxPool_1a_3x3')
    net = merge([tower_conv, tower_conv1_2, tower_pool], mode='concat', axis=3)
    net = repeat(net, 20, block17, scale=0.1)

    tower_conv = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
    tower_conv0_1 = relu(batch_normalization(conv_2d(tower_conv, 384, 3, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_0a_1x1')))

    tower_conv1 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, padding='VALID', activation=None,name='Conv2d_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1,288,3, bias=False, strides=2, padding='VALID',activation=None, name='COnv2d_1a_3x3')))

    tower_conv2 = relu(batch_normalization(conv_2d(net, 256,1, bias=False, activation=None,name='Conv2d_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2, 288,3, bias=False, name='Conv2d_0b_3x3',activation=None)))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 320, 3, bias=False, strides=2, padding='VALID',activation=None, name='Conv2d_1a_3x3')))
    
    tower_pool = max_pool_2d(net, 3, strides=2, padding='VALID', name='MaxPool_1a_3x3')
    '''
    tower_conv = relu(batch_normalization(conv_2d(net, 384, 1, bias=False, strides=2,activation=None, padding='VALID', name='Conv2d_6a_b0_0a_3x3')))
    tower_conv1_0 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0b_3x3')))
    tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 384, 1, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_6a_b1_0c_3x3')))
    tower_pool = max_pool_2d(net, 1, strides=2, padding='VALID',name='MaxPool_1a_3x3')
    net = merge([tower_conv, tower_conv1_2, tower_pool], mode='concat', axis=3)
    net = repeat(net, 20, block17, scale=0.1)

    tower_conv = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
    tower_conv0_1 = relu(batch_normalization(conv_2d(tower_conv, 384, 1, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_0a_1x1')))

    tower_conv1 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, padding='VALID', activation=None,name='Conv2d_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1,288,1, bias=False, strides=2, padding='VALID',activation=None, name='COnv2d_1a_3x3')))

    tower_conv2 = relu(batch_normalization(conv_2d(net, 256,1, bias=False, activation=None,name='Conv2d_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2, 288,1, bias=False, name='Conv2d_0b_3x3',activation=None)))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 320, 1, bias=False, strides=2, padding='VALID',activation=None, name='Conv2d_1a_3x3')))
    
    
    tower_pool = max_pool_2d(net, 1, strides=2, padding='VALID', name='MaxPool_1a_3x3')
    
    ####
    net = merge([tower_conv0_1, tower_conv1_1,tower_conv2_2, tower_pool], mode='concat', axis=3)

    net = repeat(net, 9, block8, scale=0.2)
    net = block8(net, activation=None)

    net = relu(batch_normalization(conv_2d(net, 1536, 1, bias=False, activation=None, name='Conv2d_7b_1x1')))
    net = avg_pool_2d(net, net.get_shape().as_list()[1:3],strides=2, padding='VALID', name='AvgPool_1a_8x8')
    net = flatten(net)
    net = dropout(net, dropout_keep_prob)
    loss = fully_connected(net, num_classes,activation='softmax')


    network = tflearn.regression(loss, optimizer='RMSprop',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)
    model = tflearn.DNN(network, checkpoint_path='inception_resnet_v2',
                        max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir="./tflearn_logs/")

    model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest), shuffle=True,
              show_metric=True, batch_size=batchNum, snapshot_step=2000,
              snapshot_epoch=False, run_id='inception_resnet_v2_oxflowers17')

    if modelStore: model.save(_id + '-model.tflearn')
示例#4
0
def encoder_with_convs_and_symmetry(in_signal, n_filters=[64, 128, 256, 1024], filter_sizes=[1], strides=[1],
                                        b_norm=True, non_linearity=tf.nn.relu, regularizer=None, weight_decay=0.001,
                                        symmetry=tf.reduce_max, dropout_prob=None, pool=avg_pool_1d, pool_sizes=None, scope=None,
                                        reuse=False, padding='same', verbose=False, closing=None, conv_op=conv_1d, plm=False):
    '''An Encoder (recognition network), which maps inputs onto a latent space.
    '''

    if verbose:
        print 'Building Encoder'

    n_layers = len(n_filters)
    filter_sizes = replicate_parameter_for_all_layers(filter_sizes, n_layers)
    strides = replicate_parameter_for_all_layers(strides, n_layers)
    dropout_prob = replicate_parameter_for_all_layers(dropout_prob, n_layers)

    if n_layers < 2:
        raise ValueError('More than 1 layers are expected.')

    _range = (n_layers - 1) if plm else n_layers
    for i in xrange(_range):
        if i == 0:
            layer = in_signal

        name = 'encoder_conv_layer_' + str(i)
        scope_i = expand_scope_by_name(scope, name)
        layer = conv_op(layer, nb_filter=n_filters[i], filter_size=filter_sizes[i], strides=strides[i], regularizer=regularizer,
                        weight_decay=weight_decay, name=name, reuse=reuse, scope=scope_i, padding=padding)

        if verbose:
            print name, 'conv params = ', np.prod(layer.W.get_shape().as_list()) + np.prod(layer.b.get_shape().as_list()),

        if b_norm:
            name += '_bnorm'
            scope_i = expand_scope_by_name(scope, name)
            layer = batch_normalization(layer, name=name, reuse=reuse, scope=scope_i)
            if verbose:
                print 'bnorm params = ', np.prod(layer.beta.get_shape().as_list()) + np.prod(layer.gamma.get_shape().as_list())

        if non_linearity is not None:
            layer = non_linearity(layer)

        if pool is not None and pool_sizes is not None:
            if pool_sizes[i] is not None:
                layer = pool(layer, kernel_size=pool_sizes[i])

        if dropout_prob is not None and dropout_prob[i] > 0:
            layer = dropout(layer, 1.0 - dropout_prob[i])

        if verbose:
            print layer
            print 'output size:', np.prod(layer.get_shape().as_list()[1:]), '\n'

    if symmetry is not None:
        layer = symmetry(layer, axis=1)
        if verbose:
            print layer

    if closing is not None:
        layer = closing(layer)
        print layer

    if not plm:
        return layer
    else:
        name = 'encoder_z_mean'
        scope = expand_scope_by_name(scope, name)
        z_mean = fully_connected(layer, n_filters[-1], activation='linear', weights_init='xavier', name=name, regularizer=None, weight_decay=weight_decay, reuse=reuse, scope=scope)
        name = 'encoder_z_log_sigma_sq'
        scope = expand_scope_by_name(scope, name)
        z_log_sigma_sq = fully_connected(layer, n_filters[-1], activation='softplus', weights_init='xavier', name=name, regularizer=None, weight_decay=weight_decay, reuse=reuse, scope=scope)
        return z_mean, z_log_sigma_sq
示例#5
0
def decoder_with_fc_only(
    latent_signal,
    layer_sizes=[],
    b_norm=True,
    b_norm_decay=0.9,
    non_linearity=tf.nn.relu,
    regularizer=None,
    weight_decay=0.001,
    reuse=False,
    scope=None,
    dropout_prob=None,
    b_norm_finish=False,
    b_norm_decay_finish=0.9,
    verbose=False,
):
    """A decoding network which maps points from the latent space back onto the data space.
    """
    if verbose:
        print("Building Decoder")

    n_layers = len(layer_sizes)
    dropout_prob = replicate_parameter_for_all_layers(dropout_prob, n_layers)

    if n_layers < 2:
        raise ValueError(
            "For an FC decoder with single a layer use simpler code.")

    for i in range(0, n_layers - 1):
        name = "decoder_fc_" + str(i)
        scope_i = expand_scope_by_name(scope, name)

        if i == 0:
            layer = latent_signal

        layer = fully_connected(
            layer,
            layer_sizes[i],
            activation="linear",
            weights_init="xavier",
            name=name,
            regularizer=regularizer,
            weight_decay=weight_decay,
            reuse=reuse,
            scope=scope_i,
        )

        if verbose:
            print((
                name,
                "FC params = ",
                np.prod(layer.W.get_shape().as_list()) +
                np.prod(layer.b.get_shape().as_list()),
            ))

        if b_norm:
            name += "_bnorm"
            scope_i = expand_scope_by_name(scope, name)
            layer = batch_normalization(layer,
                                        decay=b_norm_decay,
                                        name=name,
                                        reuse=reuse,
                                        scope=scope_i)
            if verbose:
                print((
                    "bnorm params = ",
                    np.prod(layer.beta.get_shape().as_list()) +
                    np.prod(layer.gamma.get_shape().as_list()),
                ))

        if non_linearity is not None:
            layer = non_linearity(layer)

        if dropout_prob is not None and dropout_prob[i] > 0:
            layer = dropout(layer, 1.0 - dropout_prob[i])

        if verbose:
            print(layer)
            print(("output size:", np.prod(layer.get_shape().as_list()[1:]),
                   "\n"))

    # Last decoding layer never has a non-linearity.
    name = "decoder_fc_" + str(n_layers - 1)
    scope_i = expand_scope_by_name(scope, name)
    layer = fully_connected(
        layer,
        layer_sizes[n_layers - 1],
        activation="linear",
        weights_init="xavier",
        name=name,
        regularizer=regularizer,
        weight_decay=weight_decay,
        reuse=reuse,
        scope=scope_i,
    )
    if verbose:
        print((
            name,
            "FC params = ",
            np.prod(layer.W.get_shape().as_list()) +
            np.prod(layer.b.get_shape().as_list()),
        ))

    if b_norm_finish:
        name += "_bnorm"
        scope_i = expand_scope_by_name(scope, name)
        layer = batch_normalization(layer,
                                    decay=b_norm_decay_finish,
                                    name=name,
                                    reuse=reuse,
                                    scope=scope_i)
        if verbose:
            print((
                "bnorm params = ",
                np.prod(layer.beta.get_shape().as_list()) +
                np.prod(layer.gamma.get_shape().as_list()),
            ))

    if verbose:
        print(layer)
        print(("output size:", np.prod(layer.get_shape().as_list()[1:]), "\n"))

    return layer
示例#6
0
def add_rnn_unit(prefix, in_data, in_hidden, is_last, unitID=None, isFirst=None):
    """Add a RNN-like unit"""
    # hidden_size: numpy array
    # in_data, in_hidden are lists
    if isFirst is None or isFirst==True:
        flagReuse = False
    else:
        flagReuse = True
#        tf.get_variable_scope().reuse_variables()

    f_size = res_f_size
    n_class = num_class
    num_samples = int(in_data.get_shape()[0])
    with tf.variable_scope(prefix) as scope_prefix:
        n_fc = np.prod(hidden_size)

        # Fully connect for U*x
        with tf.variable_scope("FC_U", reuse=flagReuse) as scope:
            U_mult_x = linear(tf.reshape(in_data, [num_samples, -1]), n_fc)
            U_mult_x = batch_normalization(incoming=U_mult_x)

        # Sum up predecessors of hidden
        #if len(in_hidden) == 1:
        #    h_sum = in_hidden[0]
        #else:
        #h_sum = tf.add_n(in_hidden)
        h_sum = in_hidden

        # Fully connect for W*h
        with tf.variable_scope("FC_W", reuse=flagReuse) as scope:
            W_mult_h = linear(tf.reshape(h_sum, [num_samples, -1]), n_fc)
            W_mult_h = batch_normalization(incoming=W_mult_h)

        with tf.variable_scope("FC_bias", reuse=flagReuse):
            bias = _variable_on_cpu('bias_FC_hid', W_mult_h.get_shape(), tf.constant_initializer(0.0))

        h = relu(tf.add(W_mult_h, U_mult_x) + bias)

        if not is_last:
            # ResNet block
            shape = [num_samples] + hidden_size
            if unitID is not None:
                print('Add block_resnet %s for unit %d' % (prefix, unitID))
            else:
                print('Add block_resnet %s' % prefix)

            if unitID is not None:
                h_res = add_block_res(tf.reshape(h, shape), 'Resnet_unit%d' % unitID, f_size)
            else:
                h_res = add_block_res(tf.reshape(h, shape), scope_prefix, f_size)
            h_res = tf.reshape(h_res, [num_samples, -1])
        else:
            h_res = h

        # Fully connect for V*h
        with tf.variable_scope("FC_V", reuse=flagReuse) as scope:
            shape = in_data.get_shape().as_list()
            n_pixel = shape[1] * shape[2]
            n_volume = n_pixel * n_class
            o = linear(h_res, n_volume)
            o = tf.reshape(o, [num_samples, shape[1], shape[2], n_class])


        return h_res, o
def decoder_with_convs_only(in_signal,
                            n_filters,
                            filter_sizes,
                            strides,
                            padding='same',
                            b_norm=True,
                            non_linearity=tf.nn.relu,
                            conv_op=conv_1d_tranpose,
                            regularizer=None,
                            weight_decay=0.001,
                            dropout_prob=None,
                            upsample_sizes=None,
                            b_norm_finish=False,
                            scope=None,
                            reuse=False,
                            verbose=False):

    if verbose:
        print 'Building Decoder'

    n_layers = len(n_filters)
    filter_sizes = replicate_parameter_for_all_layers(filter_sizes, n_layers)
    strides = replicate_parameter_for_all_layers(strides, n_layers)
    dropout_prob = replicate_parameter_for_all_layers(dropout_prob, n_layers)

    for i in xrange(n_layers):
        if i == 0:
            layer = in_signal

        name = 'decoder_conv_layer_' + str(i)
        scope_i = expand_scope_by_name(scope, name)

        layer = conv_op(layer,
                        nb_filter=n_filters[i],
                        filter_size=filter_sizes[i],
                        strides=strides[i],
                        padding=padding,
                        regularizer=regularizer,
                        weight_decay=weight_decay,
                        name=name,
                        reuse=reuse,
                        scope=scope_i)

        if verbose:
            print name, 'conv params = ', np.prod(
                layer.W.get_shape().as_list()) + np.prod(
                    layer.b.get_shape().as_list()),

        if (b_norm and i < n_layers - 1) or (i == n_layers - 1
                                             and b_norm_finish):
            name += '_bnorm'
            scope_i = expand_scope_by_name(scope, name)
            layer = batch_normalization(layer,
                                        name=name,
                                        reuse=reuse,
                                        scope=scope_i)
            if verbose:
                print 'bnorm params = ', np.prod(
                    layer.beta.get_shape().as_list()) + np.prod(
                        layer.gamma.get_shape().as_list())

        if non_linearity is not None and i < n_layers - 1:  # Last layer doesn't have a non-linearity.
            layer = non_linearity(layer)

        if dropout_prob is not None and dropout_prob[i] > 0:
            layer = dropout(layer, 1.0 - dropout_prob[i])

        if upsample_sizes is not None and upsample_sizes[i] is not None:
            layer = tf.tile(layer, multiples=[1, upsample_sizes[i], 1])

        if verbose:
            print layer
            print 'output size:', np.prod(
                layer.get_shape().as_list()[1:]), '\n'

    return layer
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, dtype, **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    for block in range(1, 3):
        with tf.variable_scope("block%d" % block):
            for layer in range(kwargs['num_layers']):
                with tf.variable_scope('layer_%d' % layer):
                    res = net
                    for sublayer in range(kwargs['num_sub_layers']):
                        res = batch_normalization(
                            res, scope='bn_%d' % sublayer)
                        res = tf.nn.relu(res)
                        res = conv_1d(
                            res,
                            64,
                            3,
                            scope="conv_1d_%d" % sublayer,
                            weights_init=variance_scaling_initializer(
                                dtype=dtype)
                        )
                    k = tf.get_variable(
                        "k", initializer=tf.constant_initializer(1.0), shape=[])
                    net = tf.nn.relu(k) * res + net
            net = max_pool_1d(net, 2)
        net = tf.nn.relu(net)

    net = central_cut(net, block_size, 4)
    net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")
    # with tf.name_scope("RNN"):
    #     from tensorflow.contrib.cudnn_rnn import CudnnGRU, RNNParamsSaveable
    #     rnn_layer = CudnnGRU(
    #         num_layers=1,
    #         num_units=64,
    #         input_size=64,
    #     )
    #
    #     print(rnn_layer.params_size())
    #     import sys
    #     sys.exit(0)
    #     rnn_params = tf.get_variable("rnn_params", shape=[rnn_layer.params_size()], validate_shape=False)
    #     params_saveable = RNNParamsSaveable(
    #         rnn_layer.params_to_canonical, rnn_layer.canonical_to_params, [rnn_params])
    #     tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable)
    #

    with tf.name_scope("RNN"):
        cell = tf.contrib.rnn.GRUCell(64)
        init_state = cell.zero_state(batch_size, dtype=tf.float32)
        outputs, final_state = tf.nn.dynamic_rnn(cell, net, initial_state=init_state, sequence_length=tf.div(X_len + 3, 4), time_major=True, parallel_iterations=128)

    net = conv_1d(outputs, 9, 1, scope='logits')
    return {
        'logits': net,
        'init_state': init_state,
        'final_state': final_state
    }
示例#9
0
def block35(net, scale=1.0, activation="relu"):
    tower_conv = relu(
        batch_normalization(
            conv_2d(net, 32, 1, bias=False, activation=None,
                    name='Conv2d_1x1')))
    tower_conv1_0 = relu(
        batch_normalization(
            conv_2d(net,
                    32,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_0a_1x1')))
    tower_conv1_1 = relu(
        batch_normalization(
            conv_2d(tower_conv1_0,
                    32,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_0b_3x3')))
    tower_conv2_0 = relu(
        batch_normalization(
            conv_2d(net,
                    32,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_0a_1x1')))
    tower_conv2_1 = relu(
        batch_normalization(
            conv_2d(tower_conv2_0,
                    48,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_0b_3x3')))
    tower_conv2_2 = relu(
        batch_normalization(
            conv_2d(tower_conv2_1,
                    64,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_0c_3x3')))
    tower_mixed = merge([tower_conv, tower_conv1_1, tower_conv2_2],
                        mode='concat',
                        axis=3)
    tower_out = relu(
        batch_normalization(
            conv_2d(tower_mixed,
                    net.get_shape()[3],
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_1x1')))
    net += scale * tower_out
    if activation:
        if isinstance(activation, str):
            net = activations.get(activation)(net)
        elif hasattr(activation, '__call__'):
            net = activation(net)
        else:
            raise ValueError("Invalid Activation.")
    return net
# Load data
(train_x, train_y), (val_x,val_y), (test_x, test_y) = fer2013()


# Define number of output classes.
num_classes = 7

# Define padding scheme.
padding = 'VALID'

# Model Architecture
network = input_data(shape=[None, 48, 48, 1])
#change stride, decrease filter size, same padding instead of normalization (this run)
conv_1 = relu(conv_2d(network, 64, 7, strides=2, bias=True, padding=padding, activation=None, name='Conv2d_1'))
maxpool_1 = batch_normalization(max_pool_2d(conv_1, 3, strides=2, padding=padding, name='MaxPool_1'))
#LRN_1 = local_response_normalization(maxpool_1, name='LRN_1')
# FeatEX-1
conv_2a = relu(conv_2d(maxpool_1, 96, 1, strides=1, padding=padding, name='Conv_2a_FX1'))
maxpool_2a = max_pool_2d(maxpool_1, 3, strides=1, padding=padding, name='MaxPool_2a_FX1')
conv_2b = relu(conv_2d(conv_2a, 208, 3, strides=1, padding=padding, name='Conv_2b_FX1'))
conv_2c = relu(conv_2d(maxpool_2a, 64, 1, strides=1, padding=padding, name='Conv_2c_FX1'))
FX1_out = merge([conv_2b, conv_2c], mode='concat', axis=3, name='FX1_out')
# FeatEX-2
conv_3a = relu(conv_2d(FX1_out, 96, 1, strides=1, padding=padding, name='Conv_3a_FX2'))
maxpool_3a = max_pool_2d(FX1_out, 3, strides=1, padding=padding, name='MaxPool_3a_FX2')
conv_3b = relu(conv_2d(conv_3a, 208, 3, strides=1, padding=padding, name='Conv_3b_FX2'))
conv_3c = relu(conv_2d(maxpool_3a, 64, 1, strides=1, padding=padding, name='Conv_3c_FX2'))
FX2_out = merge([conv_3b, conv_3c], mode='concat', axis=3, name='FX2_out')
net = flatten(FX2_out)
if do:
def construct_inceptionv3onfire(x,y, training=False, enable_batch_norm=True):

    # build network as per architecture

    network = input_data(shape=[None, y, x, 3])

    conv1_3_3 = conv_2d(network, 32, 3, strides=2, activation='relu', name = 'conv1_3_3',padding='valid')
    conv2_3_3 = conv_2d(conv1_3_3, 32, 3, strides=1, activation='relu', name = 'conv2_3_3',padding='valid')
    conv3_3_3 = conv_2d(conv2_3_3, 64, 3, strides=2, activation='relu', name = 'conv3_3_3')

    pool1_3_3 = max_pool_2d(conv3_3_3, 3,strides=2)
    if enable_batch_norm:
        pool1_3_3 = batch_normalization(pool1_3_3)
    conv1_7_7 = conv_2d(pool1_3_3, 80,3, strides=1, activation='relu', name='conv2_7_7_s2',padding='valid')
    conv2_7_7 = conv_2d(conv1_7_7, 96,3, strides=1, activation='relu', name='conv2_7_7_s2',padding='valid')
    pool2_3_3= max_pool_2d(conv2_7_7,3,strides=2)

    inception_3a_1_1 = conv_2d(pool2_3_3,64, filter_size=1, activation='relu', name='inception_3a_1_1')

    inception_3a_3_3_reduce = conv_2d(pool2_3_3, 48, filter_size=1, activation='relu', name='inception_3a_3_3_reduce')
    inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 64, filter_size=[5,5],  activation='relu',name='inception_3a_3_3')


    inception_3a_5_5_reduce = conv_2d(pool2_3_3, 64, filter_size=1, activation='relu', name = 'inception_3a_5_5_reduce')
    inception_3a_5_5_asym_1 = conv_2d(inception_3a_5_5_reduce, 96, filter_size=[3,3],  name = 'inception_3a_5_5_asym_1')
    inception_3a_5_5 = conv_2d(inception_3a_5_5_asym_1, 96, filter_size=[3,3],  name = 'inception_3a_5_5')


    inception_3a_pool = avg_pool_2d(pool2_3_3, kernel_size=3, strides=1,  name='inception_3a_pool')
    inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1')

    # merge the inception_3a

    inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=3, name='inception_3a_output')


    inception_5a_1_1 = conv_2d(inception_3a_output, 96, 1, activation='relu', name='inception_5a_1_1')

    inception_5a_3_3_reduce = conv_2d(inception_3a_output, 64, filter_size=1, activation='relu', name='inception_5a_3_3_reduce')
    inception_5a_3_3_asym_1 = conv_2d(inception_5a_3_3_reduce, 64, filter_size=[1,7],  activation='relu',name='inception_5a_3_3_asym_1')
    inception_5a_3_3 = conv_2d(inception_5a_3_3_asym_1,96, filter_size=[7,1],  activation='relu',name='inception_5a_3_3')


    inception_5a_5_5_reduce = conv_2d(inception_3a_output, 64, filter_size=1, activation='relu', name = 'inception_5a_5_5_reduce')
    inception_5a_5_5_asym_1 = conv_2d(inception_5a_5_5_reduce, 64, filter_size=[7,1],  name = 'inception_5a_5_5_asym_1')
    inception_5a_5_5_asym_2 = conv_2d(inception_5a_5_5_asym_1, 64, filter_size=[1,7],  name = 'inception_5a_5_5_asym_2')
    inception_5a_5_5_asym_3 = conv_2d(inception_5a_5_5_asym_2, 64, filter_size=[7,1],  name = 'inception_5a_5_5_asym_3')
    inception_5a_5_5 = conv_2d(inception_5a_5_5_asym_3, 96, filter_size=[1,7],  name = 'inception_5a_5_5')


    inception_5a_pool = avg_pool_2d(inception_3a_output, kernel_size=3, strides=1 )
    inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 96, filter_size=1, activation='relu', name='inception_5a_pool_1_1')

    # merge the inception_5a__
    inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], mode='concat', axis=3)



    inception_7a_1_1 = conv_2d(inception_5a_output, 80, 1, activation='relu', name='inception_7a_1_1')
    inception_7a_3_3_reduce = conv_2d(inception_5a_output, 96, filter_size=1, activation='relu', name='inception_7a_3_3_reduce')
    inception_7a_3_3_asym_1 = conv_2d(inception_7a_3_3_reduce, 96, filter_size=[1,3],  activation='relu',name='inception_7a_3_3_asym_1')
    inception_7a_3_3_asym_2 = conv_2d(inception_7a_3_3_reduce, 96, filter_size=[3,1],  activation='relu',name='inception_7a_3_3_asym_2')
    inception_7a_3_3=merge([inception_7a_3_3_asym_1,inception_7a_3_3_asym_2],mode='concat',axis=3)

    inception_7a_5_5_reduce = conv_2d(inception_5a_output, 66, filter_size=1, activation='relu', name = 'inception_7a_5_5_reduce')
    inception_7a_5_5_asym_1 = conv_2d(inception_7a_5_5_reduce, 96, filter_size=[3,3],  name = 'inception_7a_5_5_asym_1')
    inception_7a_5_5_asym_2 = conv_2d(inception_7a_3_3_asym_1, 96, filter_size=[1,3],  activation='relu',name='inception_7a_5_5_asym_2')
    inception_7a_5_5_asym_3 = conv_2d(inception_7a_3_3_asym_1, 96, filter_size=[3,1],  activation='relu',name='inception_7a_5_5_asym_3')
    inception_7a_5_5=merge([inception_7a_5_5_asym_2,inception_7a_5_5_asym_3],mode='concat',axis=3)


    inception_7a_pool = avg_pool_2d(inception_5a_output, kernel_size=3, strides=1 )
    inception_7a_pool_1_1 = conv_2d(inception_7a_pool, 96, filter_size=1, activation='relu', name='inception_7a_pool_1_1')

    # merge the inception_7a__
    inception_7a_output = merge([inception_7a_1_1, inception_7a_3_3, inception_7a_5_5, inception_7a_pool_1_1], mode='concat', axis=3)



    pool5_7_7=global_avg_pool(inception_7a_output)
    if(training):
        pool5_7_7=dropout(pool5_7_7,0.4)
    loss = fully_connected(pool5_7_7, 2,activation='softmax')

    if(training):
        network = regression(loss, optimizer='rmsprop',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
    else:
        network=loss

    model = tflearn.DNN(network, checkpoint_path='inceptionv3',
                        max_checkpoints=1, tensorboard_verbose=0)

    return model
示例#12
0
def build_model(optimizer=HYPERPARAMS.optimizer, optimizer_param=HYPERPARAMS.optimizer_param, 
    learning_rate=HYPERPARAMS.learning_rate, keep_prob=HYPERPARAMS.keep_prob,
    learning_rate_decay=HYPERPARAMS.learning_rate_decay, decay_step=HYPERPARAMS.decay_step):

    images_input = input_data(shape=[None, NETWORK.input_size, NETWORK.input_size, 1], name='input1')
    
    images_network = conv_2d(images_input, 16, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 16, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 2, strides=2)  #24*24*16
    
    
    images_network = conv_2d(images_network, 32, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 32, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 2, strides=2)    #12*12*32
    
    images_network=tf.pad(images_network,[[0,0],[18,18],[18,18],[0,0]],'CONSTANT')
    images_network = merge([images_network, images_input], 'concat', axis=3)              #48*48*33
    
    images_network = conv_2d(images_network, 64, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 64, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 64, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 2, strides=2)       #24*24*64
    
    
    images_network = conv_2d(images_network, 128, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 128, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 128, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 2, strides=2)      #12*12*128
#     
    images_network = conv_2d(images_network, 128, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 128, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = conv_2d(images_network, 128, 3, activation='relu')
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 2, strides=2)     #6*6*128
     
    images_network = fully_connected(images_network, 1024, activation='relu')
    images_network = dropout(images_network,keep_prob=keep_prob)
    if NETWORK.use_batchnorm_after_fully_connected_layers:
        images_network = batch_normalization(images_network)
    images_network = fully_connected(images_network, 1024, activation='relu')
    images_network = dropout(images_network, keep_prob=keep_prob)
    if NETWORK.use_batchnorm_after_fully_connected_layers:
        images_network = batch_normalization(images_network)

    if NETWORK.use_landmarks or NETWORK.use_hog_and_landmarks:
        if NETWORK.use_hog_sliding_window_and_landmarks:
            landmarks_network = input_data(shape=[None, 2728], name='input2')
        elif NETWORK.use_hog_and_landmarks:
            landmarks_network = input_data(shape=[None, 208], name='input2')
        else:
            landmarks_network = input_data(shape=[None, 68, 2], name='input2')
        landmarks_network = fully_connected(landmarks_network, 1024, activation=NETWORK.activation)
        if NETWORK.use_batchnorm_after_fully_connected_layers:
            landmarks_network = batch_normalization(landmarks_network)
        landmarks_network = fully_connected(landmarks_network, 40, activation=NETWORK.activation)
        if NETWORK.use_batchnorm_after_fully_connected_layers:
            landmarks_network = batch_normalization(landmarks_network)
        images_network = fully_connected(images_network, 40, activation=NETWORK.activation)
        network = merge([images_network, landmarks_network], 'concat', axis=1)
    else:
        network = images_network
    network = fully_connected(network, NETWORK.output_size, activation='softmax')

    if optimizer == 'momentum':
        # FIXME base_lr * (1 - iter/max_iter)^0.5, base_lr = 0.01
        optimizer = Momentum(learning_rate=learning_rate, momentum=optimizer_param,
                    lr_decay=learning_rate_decay, decay_step=decay_step)
    elif optimizer == 'adam':
        optimizer = Adam(learning_rate=learning_rate, beta1=optimizer_param, beta2=learning_rate_decay)
    else:
        print("Unknown optimizer: {}".format(optimizer))
    network = regression(network, optimizer=optimizer, loss=NETWORK.loss, learning_rate=learning_rate, name='output')

    return network
示例#13
0
def build_MyModel(optimizer=HYPERPARAMS.optimizer,
                  optimizer_param=HYPERPARAMS.optimizer_param,
                  learning_rate=HYPERPARAMS.learning_rate,
                  keep_prob=HYPERPARAMS.keep_prob,
                  learning_rate_decay=HYPERPARAMS.learning_rate_decay,
                  decay_step=HYPERPARAMS.decay_step):

    images_network = input_data(
        shape=[None, NETWORK.input_size, NETWORK.input_size, 1], name='input1')
    images_network = conv_2d(images_network,
                             64,
                             3,
                             activation=NETWORK.activation)
    #images_network = local_response_normalization(images_network)
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 3, strides=2)
    images_network = conv_2d(images_network,
                             128,
                             3,
                             activation=NETWORK.activation)
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 3, strides=2)
    images_network = conv_2d(images_network,
                             256,
                             3,
                             activation=NETWORK.activation)
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 3, strides=2)
    images_network = dropout(images_network, keep_prob=keep_prob)
    images_network = fully_connected(images_network,
                                     4096,
                                     activation=NETWORK.activation)
    images_network = dropout(images_network, keep_prob=keep_prob)
    images_network = fully_connected(images_network,
                                     1024,
                                     activation=NETWORK.activation)
    if NETWORK.use_batchnorm_after_fully_connected_layers:
        images_network = batch_normalization(images_network)

    if NETWORK.use_landmarks or NETWORK.use_hog_and_landmarks:
        if NETWORK.use_hog_sliding_window_and_landmarks:
            landmarks_network = input_data(shape=[None, 2728], name='input2')
        elif NETWORK.use_hog_and_landmarks:
            landmarks_network = input_data(shape=[None, 208], name='input2')
        else:
            landmarks_network = input_data(shape=[None, 68, 2], name='input2')
        landmarks_network = fully_connected(landmarks_network,
                                            1024,
                                            activation=NETWORK.activation)
        if NETWORK.use_batchnorm_after_fully_connected_layers:
            landmarks_network = batch_normalization(landmarks_network)
        landmarks_network = fully_connected(landmarks_network,
                                            128,
                                            activation=NETWORK.activation)
        if NETWORK.use_batchnorm_after_fully_connected_layers:
            landmarks_network = batch_normalization(landmarks_network)
        images_network = fully_connected(images_network,
                                         128,
                                         activation=NETWORK.activation)
        network = merge([images_network, landmarks_network], 'concat', axis=1)
    else:
        network = images_network
    network = fully_connected(network,
                              NETWORK.output_size,
                              activation='softmax')

    if optimizer == 'momentum':
        optimizer = Momentum(learning_rate=learning_rate,
                             momentum=optimizer_param,
                             lr_decay=learning_rate_decay,
                             decay_step=decay_step)
    elif optimizer == 'adam':
        optimizer = Adam(learning_rate=learning_rate,
                         beta1=optimizer_param,
                         beta2=learning_rate_decay)
    else:
        print("Unknown optimizer: {}".format(optimizer))
    network = regression(network,
                         optimizer=optimizer,
                         loss=NETWORK.loss,
                         learning_rate=learning_rate,
                         name='output')

    return network
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, dtype,
             **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    with tf.name_scope("model"):
        print("model in", net.get_shape())
        for block in range(1, 4):
            with tf.variable_scope("block%d" % block):
                for layer in range(1, 20 + 1):
                    with tf.variable_scope('layer_%d' % layer):
                        res = net
                        for sublayer in [1, 2]:
                            res = batch_normalization(res,
                                                      scope='bn_%d' % sublayer)
                            res = tf.nn.relu(res)
                            res = conv_1d(
                                res,
                                64,
                                3,
                                scope="conv_1d_%d" % sublayer,
                                weights_init=variance_scaling_initializer(
                                    dtype=dtype))
                        k = tf.get_variable(
                            "k",
                            initializer=tf.constant_initializer(1.0),
                            shape=[])
                        net = tf.nn.relu(k) * res + net
                net = max_pool_1d(net, 2)

        cut_size = tf.shape(net)[1] - tf.div(block_size, 8)
        with tf.control_dependencies([
                tf.cond(
                    tflearn.get_training_mode(), lambda: tf.assert_equal(
                        tf.mod(cut_size, 2), 0, name="cut_size_assert"),
                    lambda: tf.no_op())
        ]):
            cut_size = tf.div(cut_size, 2)

        net = tf.slice(net, [0, cut_size, 0],
                       [-1, tf.div(block_size, 8), -1],
                       name="Cutting")
        print("after slice", net.get_shape())

        net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")

        state_size = 64
        outputs = net
        print("outputs", outputs.get_shape())

        with tf.variable_scope("Output"):
            outputs = tf.reshape(outputs,
                                 [block_size // 8 * batch_size, state_size],
                                 name="flatten")
            W = tf.get_variable("W", shape=[state_size, out_classes])
            b = tf.get_variable("b", shape=[out_classes])
            outputs = tf.matmul(outputs, W) + b
            logits = tf.reshape(outputs,
                                [block_size // 8, batch_size, out_classes],
                                name="logits")
    print("model out", logits.get_shape())
    return {
        'logits': logits,
        'init_state': tf.constant(0),
        'final_state': tf.constant(0),
    }
示例#15
0
def cnn_model(x_shape, y_shape, archi="AlexNet"):
    image_aug = ImageAugmentation()
    image_aug.add_random_blur(1)
    image_aug.add_random_flip_leftright()
    image_aug.add_random_flip_updown()
    image_aug.add_random_rotation()
    image_aug.add_random_90degrees_rotation()

    # AlexNet, replacing local normalization with batch normalization.
    if archi == "AlexNet":
        net = input_data(shape=[None] + list(x_shape[1:]),
                         data_augmentation=image_aug)
        net = conv_2d(net, 96, 7, strides=2, activation='relu')

        net = batch_normalization(net)
        net = max_pool_2d(net, 2)
        net = dropout(net, 0.8)

        net = conv_2d(net, 256, 5, strides=2, activation='relu')
        net = batch_normalization(net)

        net = max_pool_2d(net, 2)
        net = dropout(net, 0.8)

        net = conv_2d(net, 384, 3, activation='relu')
        net = conv_2d(net, 384, 3, activation='relu')
        net = conv_2d(net, 256, 3, activation='relu')
        net = batch_normalization(net)
        net = max_pool_2d(net, 2)
        net = dropout(net, 0.8)

        net = fully_connected(net, 4096, activation='tanh')
        net = dropout(net, 0.5)
        net = fully_connected(net, 4096, activation='tanh')
        net = dropout(net, 0.5)
        net = fully_connected(net, y_shape[1], activation='softmax')
        net = regression(net,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)

    # ResNet, with dropout.
    if archi == "ResNet":
        n = 5
        net = tflearn.input_data(shape=[None] + list(x_shape[1:]),
                                 data_augmentation=image_aug)
        net = tflearn.conv_2d(net,
                              16,
                              5,
                              strides=2,
                              regularizer='L2',
                              weight_decay=0.0001)
        net = tflearn.residual_block(net, n, 16)
        net = tflearn.residual_block(net, 1, 32, downsample=True)
        net = tflearn.dropout(net, 0.8)
        net = tflearn.residual_block(net, n - 1, 32)
        net = tflearn.residual_block(net, 1, 64, downsample=True)
        net = tflearn.dropout(net, 0.8)
        net = tflearn.residual_block(net, n - 1, 64)
        net = tflearn.batch_normalization(net)
        net = tflearn.activation(net, 'relu')
        net = tflearn.global_avg_pool(net)
        net = tflearn.fully_connected(net, y_shape[1], activation='softmax')
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 loss='categorical_crossentropy',
                                 learning_rate=0.0001)

    return net
示例#16
0
def get_network_architecture(image_width, image_height, number_of_classes, learning_rate):

    number_of_channels = 1

    network = input_data(
        shape=[None, image_width, image_height, number_of_channels],
        data_preprocessing=img_prep,
        data_augmentation=img_aug,
        name='InputData'
    )

    """
        def conv_2d(incoming, nb_filters, filter_size, strides=1, padding='same',
                    activation='linear', bias='True', weights_init='uniform_scaling',
                    bias_init='zeros', regularizer=None, weight_decay=0.001,
                    trainable=True, restore=True, reuse=False, scope=None,
                    name='Conv2D')

        network = conv_2d(network, 32, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_1')
        network = max_pool_2d(network, (2, 2), strides=2, padding='same', name='MaxPool2D_1')
        network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_1')
        network = dropout(network, 0.5, name='Dropout_1')
        network = batch_normalization(network, name='BatchNormalization')
        network = flatten(network, name='Flatten')
        network = fully_connected(network, 512, activation='relu', name='FullyConnected_1')
        network = fully_connected(network, number_of_classes, activation='softmax', name='FullyConnected_Final')

        print('  {}: {}'.format('Conv2D................', network.shape))
        print('  {}: {}'.format('MaxPool2D.............', network.shape))
        print('  {}: {}'.format('Dropout...............', network.shape))
        print('  {}: {}'.format('BatchNormalization....', network.shape))
        print('  {}: {}'.format('Flatten...............', network.shape))
        print('  {}: {}'.format('FullyConnected........', network.shape))
        print('  {}: {}'.format('FullyConnected_Final..', network.shape))

        CONV / FC -> Dropout -> BN -> activation function -> ...

        Convolutional filters: { 32, 64, 128 }
        Convolutional filter sizes: { 1, 3, 5, 11 }
        Convolutional strides: 1
        Activation: ReLu

        Pooling kernel sizes: { 2, 3, 4, 5 }
        Pooling kernel strides: 2

        Dropout probability: 0.5
            - Higher probability of keeping in earlier stages
            - Lower probability of keeping in later stages
    """

    print('\nNetwork architecture:')
    print('  {}: {}'.format('InputData.............', network.shape))

    network = conv_2d(network, 16, (7, 7), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_1')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_1')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 16, (7, 7), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_2')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_2')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_1')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_1')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 32, (5, 5), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_3')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_3')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 32, (5, 5), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_4')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_4')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_2')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_2')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 64, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_5')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_5')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 64, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_6')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_6')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_3')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_3')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 128, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_7')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_7')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 128, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_8')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_8')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_4')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_4')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 256, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_9')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_9')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 256, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_10')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_10')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_5')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_5')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = flatten(network, name='Flatten')
    print('  {}: {}'.format('Flatten...............', network.shape))


    network = fully_connected(network, 512, activation='relu', name='FullyConnected_1')
    print('  {}: {}'.format('FullyConnected........', network.shape))
    network = dropout(network, 0.5, name='Dropout_6')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = fully_connected(network, number_of_classes, activation='softmax', name="FullyConnected_Final")
    print('  {}: {}'.format('FullyConnected_Final..', network.shape))


    optimizer = Adam(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False, name='Adam')
    # optimizer = SGD(learning_rate=learning_rate, lr_decay=0.01, decay_step=100, staircase=False, use_locking=False, name='SGD')
    # optimizer = RMSProp(learning_rate=learning_rate, decay=0.9, momentum=0.9, epsilon=1e-10, use_locking=False, name='RMSProp')
    # optimizer = Momentum(learning_rate=learning_rate, momentum=0.9, lr_decay=0.01, decay_step=100, staircase=False, use_locking=False, name='Momentum')

    metric = Accuracy(name='Accuracy')
    # metric = R2(name='Standard Error')
    # metric = WeightedR2(name='Weighted Standard Error')
    # metric = Top_k(k=6, name='Top K')


    network = regression(
        network,
        optimizer=optimizer,
        loss='categorical_crossentropy',
        metric=metric,
        learning_rate=learning_rate,
        name='Regression'
    )

    return network
示例#17
0
def self(x_train, y_train, x_test, y_test):
    int_put = input_data(shape=[None, 224, 5, 5, 1], )

    conv1 = conv_3d(
        int_put,
        24,
        [24, 3, 3],
        padding='VALID',
        strides=[1, 1, 1, 1, 1],
        activation='prelu',
    )
    print('conv1', conv1.get_shape().as_list())
    batch_norm = batch_normalization(conv1)

    conv2 = conv_3d(
        batch_norm,
        12,
        [24, 3, 3],
        padding='VALID',
        strides=[1, 1, 1, 1, 1],
        activation='prelu',
    )
    print('conv2', conv2.get_shape().as_list())
    batch_norm_con = batch_normalization(conv2)

    decon2 = conv_3d_transpose(batch_norm_con,
                               24, [24, 3, 3],
                               padding='VALID',
                               output_shape=[201, 3, 3, 24])
    batch_norm = batch_normalization(decon2)
    print('a')
    decon2 = conv_3d_transpose(batch_norm,
                               1, [24, 3, 3],
                               padding='VALID',
                               output_shape=[224, 5, 5, 1])
    batch_norm = batch_normalization(decon2)

    network = regression(batch_norm,
                         optimizer='Adagrad',
                         loss='mean_square',
                         learning_rate=0.01,
                         metric='R2')
    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        tensorboard_dir="./tflearn_logs/")

    for i in range(10):
        model.fit(x_train,
                  x_train,
                  n_epoch=20,
                  shuffle=True,
                  show_metric=True,
                  validation_set=(x_test, x_test),
                  batch_size=32,
                  run_id='3d_net_self')
        x_pre = model.predict(x_train)
        x_pre = np.array(x_pre)
        x_true = np.array(x_train)
        psnr(x_true, x_pre)

    model.save('my_model_self.tflearn')
    '''
class GoogLeNet:
    network = input_data(shape=[None, 1024, 1024, 1])

    conv1_5_5 = conv_2d(network,
                        64,
                        filter_size=5,
                        strides=2,
                        activation='relu',
                        padding='SAME')
    pool1_3_3 = max_pool_2d(conv1_5_5, kernel_size=3, strides=2)
    pool1_3_3 = batch_normalization(pool1_3_3)

    conv2_3_3_reduce = conv_2d(pool1_3_3,
                               64,
                               filter_size=1,
                               activation='relu',
                               padding='SAME')
    conv2_3_3 = conv_2d(conv2_3_3_reduce,
                        192,
                        filter_size=3,
                        activation='relu',
                        padding='SAME')
    conv2_3_3 = batch_normalization(conv2_3_3)
    pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2)

    # 3a
    inception_3a_1_1 = conv_2d(pool2_3_3,
                               64,
                               filter_size=1,
                               activation='relu',
                               padding='SAME')
    inception_3a_3_3_reduce = conv_2d(pool2_3_3,
                                      96,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce,
                               128,
                               filter_size=3,
                               activation='relu',
                               padding='SAME')
    inception_3a_5_5_reduce = conv_2d(pool2_3_3,
                                      16,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce,
                               32,
                               filter_size=5,
                               activation='relu',
                               padding='SAME')
    inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=3, strides=1)
    inception_3a_pool_1_1 = conv_2d(inception_3a_pool,
                                    32,
                                    filter_size=1,
                                    activation='relu')
    inception_3a_output = merge([
        inception_3a_1_1, inception_3a_3_3, inception_3a_5_5,
        inception_3a_pool_1_1
    ],
                                mode='concat',
                                axis=3)

    # 3b
    inception_3b_1_1 = conv_2d(inception_3a_output,
                               128,
                               filter_size=1,
                               activation='relu',
                               padding='SAME')
    inception_3b_3_3_reduce = conv_2d(inception_3a_output,
                                      128,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce,
                               192,
                               filter_size=3,
                               activation='relu',
                               padding='SAME')
    inception_3b_5_5_reduce = conv_2d(inception_3a_output,
                                      32,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce,
                               96,
                               filter_size=5,
                               activation='relu',
                               padding='SAME')
    inception_3b_pool = max_pool_2d(inception_3a_output,
                                    kernel_size=3,
                                    strides=1)
    inception_3b_pool_1_1 = conv_2d(inception_3b_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu')
    inception_3b_output = merge([
        inception_3b_1_1, inception_3b_3_3, inception_3b_5_5,
        inception_3b_pool_1_1
    ],
                                mode='concat',
                                axis=3)

    pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2)

    # 4a
    inception_4a_1_1 = conv_2d(pool3_3_3,
                               192,
                               filter_size=1,
                               activation='relu',
                               padding='SAME')
    inception_4a_3_3_reduce = conv_2d(pool3_3_3,
                                      96,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce,
                               208,
                               filter_size=3,
                               activation='relu',
                               padding='SAME')
    inception_4a_5_5_reduce = conv_2d(pool3_3_3,
                                      16,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce,
                               48,
                               filter_size=5,
                               activation='relu',
                               padding='SAME')
    inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1)
    inception_4a_pool_1_1 = conv_2d(inception_4a_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu')
    inception_4a_output = merge([
        inception_4a_1_1, inception_4a_3_3, inception_4a_5_5,
        inception_4a_pool_1_1
    ],
                                mode='concat',
                                axis=3)

    # 4b
    inception_4b_1_1 = conv_2d(inception_4a_output,
                               160,
                               filter_size=1,
                               activation='relu',
                               padding='SAME')
    inception_4b_3_3_reduce = conv_2d(inception_4a_output,
                                      112,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce,
                               224,
                               filter_size=3,
                               activation='relu',
                               padding='SAME')
    inception_4b_5_5_reduce = conv_2d(inception_4a_output,
                                      24,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce,
                               64,
                               filter_size=5,
                               activation='relu',
                               padding='SAME')
    inception_4b_pool = max_pool_2d(inception_4a_output,
                                    kernel_size=3,
                                    strides=1)
    inception_4b_pool_1_1 = conv_2d(inception_4b_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu')
    inception_4b_output = merge([
        inception_4b_1_1, inception_4b_3_3, inception_4b_5_5,
        inception_4b_pool_1_1
    ],
                                mode='concat',
                                axis=3)

    # 4c
    inception_4c_1_1 = conv_2d(inception_4b_output,
                               128,
                               filter_size=1,
                               activation='relu',
                               padding='SAME')
    inception_4c_3_3_reduce = conv_2d(inception_4b_output,
                                      128,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce,
                               256,
                               filter_size=3,
                               activation='relu',
                               padding='SAME')
    inception_4c_5_5_reduce = conv_2d(inception_4b_output,
                                      24,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce,
                               64,
                               filter_size=5,
                               activation='relu',
                               padding='SAME')
    inception_4c_pool = max_pool_2d(inception_4b_output,
                                    kernel_size=3,
                                    strides=1)
    inception_4c_pool_1_1 = conv_2d(inception_4c_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu')
    inception_4c_output = merge([
        inception_4c_1_1, inception_4c_3_3, inception_4c_5_5,
        inception_4c_pool_1_1
    ],
                                mode='concat',
                                axis=3)

    # 4d
    inception_4d_1_1 = conv_2d(inception_4c_output,
                               112,
                               filter_size=1,
                               activation='relu',
                               padding='SAME')
    inception_4d_3_3_reduce = conv_2d(inception_4c_output,
                                      144,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce,
                               288,
                               filter_size=3,
                               activation='relu',
                               padding='SAME')
    inception_4d_5_5_reduce = conv_2d(inception_4c_output,
                                      32,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce,
                               64,
                               filter_size=5,
                               activation='relu',
                               padding='SAME')
    inception_4d_pool = max_pool_2d(inception_4c_output,
                                    kernel_size=3,
                                    strides=1)
    inception_4d_pool_1_1 = conv_2d(inception_4d_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu')
    inception_4d_output = merge([
        inception_4d_1_1, inception_4d_3_3, inception_4d_5_5,
        inception_4d_pool_1_1
    ],
                                mode='concat',
                                axis=3)

    # 4e
    inception_4e_1_1 = conv_2d(inception_4d_output,
                               256,
                               filter_size=1,
                               activation='relu',
                               padding='SAME')
    inception_4e_3_3_reduce = conv_2d(inception_4d_output,
                                      160,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce,
                               320,
                               filter_size=3,
                               activation='relu',
                               padding='SAME')
    inception_4e_5_5_reduce = conv_2d(inception_4d_output,
                                      32,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce,
                               128,
                               filter_size=5,
                               activation='relu',
                               padding='SAME')
    inception_4e_pool = max_pool_2d(inception_4d_output,
                                    kernel_size=3,
                                    strides=1)
    inception_4e_pool_1_1 = conv_2d(inception_4e_pool,
                                    128,
                                    filter_size=1,
                                    activation='relu')
    inception_4e_output = merge([
        inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,
        inception_4e_pool_1_1
    ],
                                mode='concat',
                                axis=3)

    pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2)

    # 5a
    inception_5a_1_1 = conv_2d(pool4_3_3,
                               256,
                               filter_size=1,
                               activation='relu',
                               padding='SAME')
    inception_5a_3_3_reduce = conv_2d(pool4_3_3,
                                      160,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce,
                               320,
                               filter_size=3,
                               activation='relu',
                               padding='SAME')
    inception_5a_5_5_reduce = conv_2d(pool4_3_3,
                                      32,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce,
                               128,
                               filter_size=5,
                               activation='relu',
                               padding='SAME')
    inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1)
    inception_5a_pool_1_1 = conv_2d(inception_5a_pool,
                                    128,
                                    filter_size=1,
                                    activation='relu')
    inception_5a_output = merge([
        inception_5a_1_1, inception_5a_3_3, inception_5a_5_5,
        inception_5a_pool_1_1
    ],
                                axis=3,
                                mode='concat')

    # 5b
    inception_5b_1_1 = conv_2d(inception_5a_output,
                               384,
                               filter_size=1,
                               activation='relu',
                               padding='SAME')
    inception_5b_3_3_reduce = conv_2d(inception_5a_output,
                                      192,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce,
                               384,
                               filter_size=3,
                               activation='relu',
                               padding='SAME')
    inception_5b_5_5_reduce = conv_2d(inception_5a_output,
                                      48,
                                      filter_size=1,
                                      activation='relu',
                                      padding='SAME')
    inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce,
                               128,
                               filter_size=5,
                               activation='relu',
                               padding='SAME')
    inception_5b_pool = max_pool_2d(inception_5a_output,
                                    kernel_size=3,
                                    strides=1)
    inception_5b_pool_1_1 = conv_2d(inception_5b_pool,
                                    128,
                                    filter_size=1,
                                    activation='relu')
    inception_5b_output = merge([
        inception_5b_1_1, inception_5b_3_3, inception_5b_5_5,
        inception_5b_pool_1_1
    ],
                                axis=3,
                                mode='concat')

    pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1)
    pool5_7_7 = dropout(pool5_7_7, 0.5)
    '''Output layer'''
    output = fully_connected(pool5_7_7, 15, activation='sigmoid')

    network = regression(output,
                         optimizer='momentum',
                         loss='binary_crossentropy',
                         learning_rate=0.01)
    '''Set model + Save parameters + Tensorboard'''
    model = tflearn.DNN(network,
                        checkpoint_path='params_googlenet_cxr',
                        max_checkpoints=1,
                        tensorboard_verbose=0)
    '''Feed the oxflowers17 dataset to the model'''
    model.fit(train_x,
              train_t,
              n_epoch=10,
              validation_set=(test_x, test_t),
              show_metric=True,
              batch_size=16,
              snapshot_epoch=False,
              snapshot_step=100,
              run_id='googlenet_cxr')
示例#19
0
import tensorflow as tf
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.normalization import local_response_normalization, batch_normalization
from tflearn.layers.estimator import regression

from deepdsp.conf import conf

network = input_data(shape=[None, conf["buff_size"], conf["num_buffs"], 2],
                     name='input')

network = batch_normalization(network)

# https://github.com/tflearn/tflearn/blob/51399601c1a4f305db894b871baf743baa15ea00/tflearn/layers/core.py#L96
# network = fully_connected(network, 512, activation='leaky_relu')
network = fully_connected(network, 256, activation='elu', name="elu")
network = fully_connected(network,
                          len(conf["classes"]),
                          activation='softmax',
                          name='softmax')

# https://github.com/tflearn/tflearn/blob/4ba8c8d78bf1bbdfc595bf547bad30580cb4c20b/tflearn/layers/estimator.py#L14
network = regression(network,
                     optimizer='adam',
                     learning_rate=0.001,
                     loss='categorical_crossentropy',
                     name='target')

# https://github.com/tflearn/tflearn/blob/66c0c9c67b0472cbdc85bae0beb7992fa008480e/tflearn/models/dnn.py#L10
model = tflearn.DNN(network, tensorboard_verbose=3)
示例#20
0
def generator_fusionnet(images, name='generator'):
	dimx = DIMX
	dimy = DIMY
	dimz = DIMZ

	with tf.variable_scope(name):
		# return images
		e1 = conv_3d(incoming=images, 
					 nb_filter=NB_FILTERS*1, 
					 filter_size=4,
					 strides=[1, 1, 1, 1, 1], # DIMZ/1, DIMY/2, DIMX/2, 
					 regularizer='L1',
					 activation='elu')
		e1 = batch_normalization(incoming=e1)
		###
		e2 = conv_3d(incoming=e1, 
					 nb_filter=NB_FILTERS*1, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/2, DIMY/4, DIMX/4, 
					 regularizer='L1',
					 activation='elu')
		
		e2 = batch_normalization(incoming=e2)
		###
		e3 = conv_3d(incoming=e2, 
					 nb_filter=NB_FILTERS*2, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/4, DIMY/8, DIMX/8,
					 regularizer='L1',
					 activation='elu')
		e3 = batch_normalization(incoming=e3)
		###
		e4 = conv_3d(incoming=e3, 
					 nb_filter=NB_FILTERS*2, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/8, DIMY/16, DIMX/16,
					 regularizer='L1',
					 activation='elu')
		e4 = batch_normalization(incoming=e4)
		###
		e5 = conv_3d(incoming=e4, 
					 nb_filter=NB_FILTERS*4, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/16, DIMY/32, DIMX/32,
					 regularizer='L1',
					 activation='elu')
		e5 = batch_normalization(incoming=e5)		
		###
		e6 = conv_3d(incoming=e5, 
					 nb_filter=NB_FILTERS*4, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/32, DIMY/64, DIMX/64,
					 regularizer='L1',
					 activation='elu')
		e6 = batch_normalization(incoming=e6)		
		###
		e7 = conv_3d(incoming=e6, 
					 nb_filter=NB_FILTERS*8, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/64, DIMY/128, DIMX/128,
					 regularizer='L1',
					 activation='elu')
		e7 = batch_normalization(incoming=e7)		
		### Middle
		e8 = conv_3d(incoming=e7, 
					 nb_filter=NB_FILTERS*8, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/128, DIMY/256, DIMX/256,
					 regularizer='L1',
					 activation='elu')
		# print "Dim8: ", dimz, dimy, dimx
		dimz, dimy, dimx = dimz/2, dimy/2, dimx/2
		e8 = batch_normalization(incoming=e8)		

		################### Decoder

		# print "Dim D7a: ", dimz, dimy, dimx
		d7 = conv_3d_transpose(incoming=e8, 
							   nb_filter=NB_FILTERS*8, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/64, DIMY/128, DIMX/128,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[2, 4, 4])

		d7 = batch_normalization(incoming=d7)
		
		d7 = dropout(incoming=d7, keep_prob=0.5)
		
		d7 = merge(tensors_list=[d7, e7], mode='elemwise_sum')
		# d7 = d7+e7	
		###
		d6 = conv_3d_transpose(incoming=d7, 
							   nb_filter=NB_FILTERS*4, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/32, DIMY/64, DIMX/64,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[4, 8, 8])
		d6 = batch_normalization(incoming=d6)	
		d6 = dropout(incoming=d6, keep_prob=0.5)
		
		d6 = merge(tensors_list=[d6, e6], mode='elemwise_sum')
		# d6 = d6+e6
		###
		d5 = conv_3d_transpose(incoming=d6, 
							   nb_filter=NB_FILTERS*4, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/16, DIMY/32, DIMX/32,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[8, 16, 16])
		d5 = batch_normalization(incoming=d5)	
		d5 = dropout(incoming=d5, keep_prob=0.5)
		
		d5 = merge(tensors_list=[d5, e5], mode='elemwise_sum')
		# d5 = d5+e5
		###
		d4 = conv_3d_transpose(incoming=d5, 
							   nb_filter=NB_FILTERS*2, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/8, DIMY/16, DIMX/16,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[16, 32, 32])
		d4 = batch_normalization(incoming=d4)	
		
		d4 = merge(tensors_list=[d4, e4], mode='elemwise_sum')
		# d4 = d4+e4
		###
		d3 = conv_3d_transpose(incoming=d4, 
							   nb_filter=NB_FILTERS*2, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/4, DIMY/8, DIMX/8,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[32, 64, 64])
		d3 = batch_normalization(incoming=d3)	
		
		d3 = merge(tensors_list=[d3, e3], mode='elemwise_sum')
		# d3 = d3+e3
		###
		d2 = conv_3d_transpose(incoming=d3, 
							   nb_filter=NB_FILTERS*1, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/2, DIMY/4, DIMX/4,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[64, 128, 128])
		d2 = batch_normalization(incoming=d2)	
		
		d2 = merge(tensors_list=[d2, e2], mode='elemwise_sum')
		# d2 = d2+e2
		
		###
		d1 = conv_3d_transpose(incoming=d2, 
							   nb_filter=NB_FILTERS*1, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/1, DIMY/2, DIMX/2,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[128, 256, 256])
		d1 = batch_normalization(incoming=d1)	
		
		d1 = merge(tensors_list=[d1, e1], mode='elemwise_sum')
		# d1 = d1+e1
		###
		
		out = conv_3d_transpose(incoming=d1, 
							   nb_filter=1, 
							   filter_size=4,
							   strides=[1, 1, 1, 1, 1], # DIMZ/1, DIMY/1, DIMX/1,
							   regularizer='L1',
							   activation='tanh', 
							   output_shape=[128, 256, 256])
		return out, e8
示例#21
0
def network(img_shape, name, LR):

    network = input_data(shape=img_shape, name=name)
    conv1a_3_3 = relu(
        batch_normalization(
            conv_2d(network,
                    32,
                    3,
                    strides=2,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_1a_3x3')))
    conv2a_3_3 = relu(
        batch_normalization(
            conv_2d(conv1a_3_3,
                    32,
                    3,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_2a_3x3')))
    conv2b_3_3 = relu(
        batch_normalization(
            conv_2d(conv2a_3_3,
                    64,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_2b_3x3')))
    maxpool3a_3_3 = max_pool_2d(conv2b_3_3,
                                3,
                                strides=2,
                                padding='VALID',
                                name='MaxPool_3a_3x3')
    conv3b_1_1 = relu(
        batch_normalization(
            conv_2d(maxpool3a_3_3,
                    80,
                    1,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_3b_1x1')))
    conv4a_3_3 = relu(
        batch_normalization(
            conv_2d(conv3b_1_1,
                    192,
                    3,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_4a_3x3')))
    maxpool5a_3_3 = max_pool_2d(conv4a_3_3,
                                3,
                                strides=2,
                                padding='VALID',
                                name='MaxPool_5a_3x3')

    tower_conv = relu(
        batch_normalization(
            conv_2d(maxpool5a_3_3,
                    96,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b0_1x1')))

    tower_conv1_0 = relu(
        batch_normalization(
            conv_2d(maxpool5a_3_3,
                    48,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b1_0a_1x1')))
    tower_conv1_1 = relu(
        batch_normalization(
            conv_2d(tower_conv1_0,
                    64,
                    5,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b1_0b_5x5')))

    tower_conv2_0 = relu(
        batch_normalization(
            conv_2d(maxpool5a_3_3,
                    64,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b2_0a_1x1')))
    tower_conv2_1 = relu(
        batch_normalization(
            conv_2d(tower_conv2_0,
                    96,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b2_0b_3x3')))
    tower_conv2_2 = relu(
        batch_normalization(
            conv_2d(tower_conv2_1,
                    96,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b2_0c_3x3')))

    tower_pool3_0 = avg_pool_2d(maxpool5a_3_3,
                                3,
                                strides=1,
                                padding='same',
                                name='AvgPool_5b_b3_0a_3x3')
    tower_conv3_1 = relu(
        batch_normalization(
            conv_2d(tower_pool3_0,
                    64,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b3_0b_1x1')))

    tower_5b_out = merge(
        [tower_conv, tower_conv1_1, tower_conv2_2, tower_conv3_1],
        mode='concat',
        axis=3)

    net = repeat(tower_5b_out, 10, block35, scale=0.17)

    tower_conv = relu(
        batch_normalization(
            conv_2d(net,
                    384,
                    3,
                    bias=False,
                    strides=2,
                    activation=None,
                    padding='VALID',
                    name='Conv2d_6a_b0_0a_3x3')))
    tower_conv1_0 = relu(
        batch_normalization(
            conv_2d(net,
                    256,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_6a_b1_0a_1x1')))
    tower_conv1_1 = relu(
        batch_normalization(
            conv_2d(tower_conv1_0,
                    256,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_6a_b1_0b_3x3')))
    tower_conv1_2 = relu(
        batch_normalization(
            conv_2d(tower_conv1_1,
                    384,
                    3,
                    bias=False,
                    strides=2,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_6a_b1_0c_3x3')))
    tower_pool = max_pool_2d(net,
                             3,
                             strides=2,
                             padding='VALID',
                             name='MaxPool_1a_3x3')
    net = merge([tower_conv, tower_conv1_2, tower_pool], mode='concat', axis=3)
    net = repeat(net, 20, block17, scale=0.1)

    tower_conv = relu(
        batch_normalization(
            conv_2d(net,
                    256,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_0a_1x1')))
    # tower_conv0_1 = relu(batch_normalization(conv_2d(tower_conv, 384, 3, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_0a_1x1')))
    tower_conv0_1 = relu(
        batch_normalization(
            conv_2d(tower_conv,
                    384,
                    1,
                    bias=False,
                    strides=2,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_0a_1x1')))

    tower_conv1 = relu(
        batch_normalization(
            conv_2d(net,
                    256,
                    1,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_0a_1x1')))
    # tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1,288,3, bias=False, strides=2, padding='VALID',activation=None, name='COnv2d_1a_3x3')))
    tower_conv1_1 = relu(
        batch_normalization(
            conv_2d(tower_conv1,
                    288,
                    1,
                    bias=False,
                    strides=2,
                    padding='VALID',
                    activation=None,
                    name='COnv2d_1a_3x3')))

    tower_conv2 = relu(
        batch_normalization(
            conv_2d(net,
                    256,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_0a_1x1')))
    tower_conv2_1 = relu(
        batch_normalization(
            conv_2d(tower_conv2,
                    288,
                    3,
                    bias=False,
                    name='Conv2d_0b_3x3',
                    activation=None)))
    # tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 320, 3, bias=False, strides=2, padding='VALID',activation=None, name='Conv2d_1a_3x3')))
    tower_conv2_2 = relu(
        batch_normalization(
            conv_2d(tower_conv2_1,
                    320,
                    1,
                    bias=False,
                    strides=2,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_1a_3x3')))

    # tower_pool = max_pool_2d(net, 3, strides=2, padding='VALID', name='MaxPool_1a_3x3')
    tower_pool = max_pool_2d(net,
                             1,
                             strides=2,
                             padding='VALID',
                             name='MaxPool_1a_3x3')
    net = merge([tower_conv0_1, tower_conv1_1, tower_conv2_2, tower_pool],
                mode='concat',
                axis=3)

    net = repeat(net, 9, block8, scale=0.2)
    net = block8(net, activation=None)

    net = relu(
        batch_normalization(
            conv_2d(net,
                    1536,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_7b_1x1')))
    net = avg_pool_2d(net,
                      net.get_shape().as_list()[1:3],
                      strides=2,
                      padding='VALID',
                      name='AvgPool_1a_8x8')
    net = flatten(net)
    net = dropout(net, dropout_keep_prob)
    loss = fully_connected(net, num_classes, activation='softmax')

    network = tflearn.regression(loss,
                                 optimizer='RMSprop',
                                 loss='categorical_crossentropy',
                                 learning_rate=0.0001,
                                 name='targets')
    return network
示例#22
0
def encoder_with_convs_and_symmetry(in_signal,
                                    n_filters=[64, 128, 256, 1024],
                                    filter_sizes=[1],
                                    strides=[1],
                                    b_norm=True,
                                    non_linearity=tf.nn.relu,
                                    regularizer=None,
                                    weight_decay=0.001,
                                    symmetry=tf.reduce_max,
                                    dropout_prob=None,
                                    pool=avg_pool_1d,
                                    pool_sizes=None,
                                    scope=None,
                                    reuse=False,
                                    padding='same',
                                    verbose=False,
                                    closing=None,
                                    conv_op=conv_1d):

    if verbose:
        print('encoder_with_convs_and_symmetry')

    n_layers = len(n_filters)
    filter_sizes = replicate_parameter_for_all_layers(filter_sizes, n_layers)
    strides = replicate_parameter_for_all_layers(strides, n_layers)
    dropout_prob = replicate_parameter_for_all_layers(dropout_prob, n_layers)

    if n_layers < 1:
        raise ValueError('More than 0 layers are expected.')

    for i in range(n_layers):
        if i == 0:
            layer = in_signal

        name = 'encoder_conv_layer_' + str(i)
        scope_i = expand_scope_by_name(scope, name)
        layer = conv_op(layer,
                        nb_filter=n_filters[i],
                        filter_size=filter_sizes[i],
                        strides=strides[i],
                        regularizer=regularizer,
                        weight_decay=weight_decay,
                        name=name,
                        reuse=reuse,
                        scope=scope_i,
                        padding=padding)

        if verbose:
            print(
                name, 'conv params = ',
                np.prod(layer.W.get_shape().as_list()) +
                np.prod(layer.b.get_shape().as_list()))

        if b_norm:
            name += '_bnorm'
            scope_i = expand_scope_by_name(scope, name)
            layer = batch_normalization(layer,
                                        name=name,
                                        reuse=reuse,
                                        scope=scope_i)
            if verbose:
                print(
                    'bnorm params = ',
                    np.prod(layer.beta.get_shape().as_list()) +
                    np.prod(layer.gamma.get_shape().as_list()))

        if non_linearity is not None:
            layer = non_linearity(layer)

        if pool is not None and pool_sizes is not None:
            if pool_sizes[i] is not None:
                layer = pool(layer, kernel_size=pool_sizes[i])

        if dropout_prob is not None and dropout_prob[i] > 0:
            layer = dropout(layer, 1.0 - dropout_prob[i])

        if verbose:
            print(layer)
            print('output size:', np.prod(layer.get_shape().as_list()[1:]),
                  '\n')

    if symmetry is not None:
        layer = symmetry(layer, axis=1)
        if verbose:
            print
            layer

    if closing is not None:
        layer = closing(layer)
        print(layer)

    return layer
示例#23
0
 # The syntax with tflearn is almost identical to keras. Only differences
 # are: there is no need to flatten a tensor before passing it to a
 # fully_connected layer, since the fc layer will take care  of that
 # automatically. Also, the default padding in tflearn is 'same', while in
 # keras is 'valid'.
 net = input_data(shape=[None, MAX_SEQUENCE_LENGTH], name='input')
 net = embedding(net,
                 input_dim=MAX_NB_WORDS,
                 output_dim=EMBEDDING_DIM,
                 trainable=False,
                 name="EmbeddingLayer")
 net = conv_1d(net, 128, 5, 1, activation='relu', padding="valid")
 # one could add regularization as:
 # net = conv_1d(net, 128, 5, 1, activation='relu', regularizer="L2", padding="valid")
 net = max_pool_1d(net, 5, padding="valid")
 net = batch_normalization(net)
 net = conv_1d(net, 128, 5, activation='relu', padding="valid")
 net = max_pool_1d(net, 5, padding="valid")
 net = batch_normalization(net)
 net = conv_1d(net, 128, 5, activation='relu', padding="valid")
 net = max_pool_1d(net, 35)
 net = batch_normalization(net)
 net = fully_connected(net, 128, activation='relu')
 net = dropout(net, 0.5)
 net = fully_connected(net, y_train.shape[1], activation='softmax')
 net = regression(net,
                  optimizer='adam',
                  learning_rate=0.01,
                  loss='categorical_crossentropy',
                  name='target')
 model = tflearn.DNN(net, tensorboard_verbose=0)
示例#24
0
    def define_network(self):
        """
        Defines CNN architecture
        :return: CNN model
        """

        # My CNN 1 (type1)

        # # For data normalization
        # img_prep = ImagePreprocessing()
        # img_prep.add_featurewise_zero_center()
        # img_prep.add_featurewise_stdnorm()
        #
        # # For creating extra data(increase dataset). Flipped, Rotated, Blurred and etc. images
        # img_aug = ImageAugmentation()
        # img_aug.add_random_flip_leftright()
        # img_aug.add_random_rotation(max_angle=25.0)
        # img_aug.add_random_blur(sigma_max=3.0)
        #
        # self.network = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1],
        #                           data_augmentation=img_aug,
        #                           data_preprocessing=img_prep)
        # self.network = conv_2d(self.network, 64, 5, activation='relu')
        # self.network = max_pool_2d(self.network, 3, strides=2)
        # self.network = conv_2d(self.network, 64, 5, activation='relu')
        # self.network = max_pool_2d(self.network, 3, strides=2)
        # self.network = conv_2d(self.network, 128, 4, activation='relu')
        # self.network = dropout(self.network, 0.3)
        # self.network = fully_connected(self.network, 3072, activation='relu')
        # self.network = fully_connected(self.network, len(EMOTIONS), activation='softmax')
        # self.network = regression(self.network, optimizer='adam', loss='categorical_crossentropy')
        # self.model = tflearn.DNN(self.network, checkpoint_path=os.path.join(CHECKPOINTS_PATH + '/emotion_recognition'),
        #                          max_checkpoints=1, tensorboard_verbose=0)

        # My CNN 2 (type2)

        # For creating extra data(increase dataset). Flipped, Rotated, Blurred and etc. images
        img_aug = ImageAugmentation()
        img_aug.add_random_flip_leftright()
        img_aug.add_random_rotation(max_angle=25.0)
        img_aug.add_random_blur(sigma_max=3.0)

        self.network = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1],
                                  data_augmentation=img_aug)

        self.network = conv_2d(self.network, 64, 3, activation='relu')
        self.network = batch_normalization(self.network)
        self.network = conv_2d(self.network, 64, 3, activation='relu')
        self.network = batch_normalization(self.network)
        self.network = max_pool_2d(self.network, 2, strides=2)

        self.network = conv_2d(self.network, 128, 3, activation='relu')
        self.network = batch_normalization(self.network)
        self.network = conv_2d(self.network, 128, 3, activation='relu')
        self.network = batch_normalization(self.network)
        self.network = max_pool_2d(self.network, 2, strides=2)
        self.network = dropout(self.network, 0.2)

        self.network = conv_2d(self.network, 256, 3, activation='relu')
        self.network = batch_normalization(self.network)
        self.network = conv_2d(self.network, 256, 3, activation='relu')
        self.network = batch_normalization(self.network)
        self.network = max_pool_2d(self.network, 2, strides=2)
        self.network = dropout(self.network, 0.25)

        self.network = conv_2d(self.network, 512, 3, activation='relu')
        self.network = batch_normalization(self.network)
        self.network = conv_2d(self.network, 512, 3, activation='relu')
        self.network = batch_normalization(self.network)
        self.network = max_pool_2d(self.network, 2, strides=2)
        self.network = dropout(self.network, 0.25)

        self.network = fully_connected(self.network, 1024, activation='relu')
        self.network = batch_normalization(self.network)
        self.network = dropout(self.network, 0.45)

        self.network = fully_connected(self.network, 1024, activation='relu')
        self.network = batch_normalization(self.network)
        self.network = dropout(self.network, 0.45)

        self.network = fully_connected(self.network,
                                       len(EMOTIONS),
                                       activation='softmax')
        self.network = regression(self.network,
                                  optimizer='adam',
                                  loss='categorical_crossentropy')

        self.model = tflearn.DNN(
            self.network,
            checkpoint_path=os.path.join(CHECKPOINTS_PATH +
                                         '/emotion_recognition'),
            max_checkpoints=1,
            tensorboard_verbose=0)

        return self.model
示例#25
0
###END DATA PROCESSING###
#Split train test validate
#xtrain, x2, ytrain, y2 = train_test_split(image_list, labellist, test_size = 0.2,random_state = 1)
#xvalid, xtest, yvalid, ytest = train_test_split(x2, y2, test_size = 0.5,random_state=1)

#start of building net
network = input_data(shape=[None, 436, 640, 3])
conv1_7_7 = conv_2d(network,
                    16,
                    7,
                    strides=2,
                    activation='relu',
                    name='conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
pool1_3_3 = batch_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3,
                           16,
                           1,
                           activation='relu',
                           name='conv2_3_3_reduce')
conv2_3_3 = conv_2d(conv2_3_3_reduce,
                    48,
                    3,
                    activation='relu',
                    name='conv2_3_3')
conv2_3_3 = batch_normalization(conv2_3_3)
pool2_3_3 = max_pool_2d(conv2_3_3,
                        kernel_size=3,
                        strides=2,
                        name='pool2_3_3_s2')
示例#26
0
img_aug.add_random_rotation(max_angle=25.)

###################################
# Define network architecture
###################################

# Input is a 128 x 128 image with 3 color channels (red, green and blue)
network = input_data(shape=[None, size_image, size_image, 3],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)

# Building 'VGG Network'
''' Batch normalization is additionally used here '''

network = relu(
    batch_normalization(conv_2d(network, 64, 3, activation=None, bias=False)))
network = relu(
    batch_normalization(conv_2d(network, 64, 3, activation=None, bias=False)))
network = max_pool_2d(network, 2, strides=2)

network = relu(
    batch_normalization(conv_2d(network, 128, 3, activation=None, bias=False)))
network = relu(
    batch_normalization(conv_2d(network, 128, 3, activation=None, bias=False)))
network = max_pool_2d(network, 2, strides=2)

network = relu(
    batch_normalization(conv_2d(network, 256, 3, activation=None, bias=False)))
network = relu(
    batch_normalization(conv_2d(network, 256, 3, activation=None, bias=False)))
network = relu(
示例#27
0
def encoder_with_convs_and_symmetry(in_signal,
                                    n_filters=[64, 128, 256, 1024],
                                    filter_sizes=[1],
                                    strides=[1],
                                    b_norm=True,
                                    spn=False,
                                    non_linearity=tf.nn.relu,
                                    regularizer=None,
                                    weight_decay=0.001,
                                    symmetry=tf.reduce_max,
                                    dropout_prob=None,
                                    scope=None,
                                    reuse=False):
    '''An Encoder (recognition network), which maps inputs onto a latent space.
    '''
    warnings.warn('Using old architecture.')
    n_layers = len(n_filters)
    filter_sizes = replicate_parameter_for_all_layers(filter_sizes, n_layers)
    strides = replicate_parameter_for_all_layers(strides, n_layers)
    dropout_prob = replicate_parameter_for_all_layers(dropout_prob, n_layers)

    if n_layers < 2:
        raise ValueError('More than 1 layers are expected.')

    if spn:
        transformer = pcloud_spn(in_signal)
        in_signal = tf.batch_matmul(in_signal, transformer)
        print 'Spatial transformer was activated.'

    name = 'encoder_conv_layer_0'
    scope_i = expand_scope_by_name(scope, name)
    layer = conv_1d(in_signal,
                    nb_filter=n_filters[0],
                    filter_size=filter_sizes[0],
                    strides=strides[0],
                    regularizer=regularizer,
                    weight_decay=weight_decay,
                    name=name,
                    reuse=reuse,
                    scope=scope_i)

    if b_norm:
        name += '_bnorm'
        scope_i = expand_scope_by_name(scope, name)
        layer = batch_normalization(layer,
                                    name=name,
                                    reuse=reuse,
                                    scope=scope_i)

    layer = non_linearity(layer)

    if dropout_prob is not None and dropout_prob[0] > 0:
        layer = dropout(layer, 1.0 - dropout_prob[0])

    for i in xrange(1, n_layers):
        name = 'encoder_conv_layer_' + str(i)
        scope_i = expand_scope_by_name(scope, name)
        layer = conv_1d(layer,
                        nb_filter=n_filters[i],
                        filter_size=filter_sizes[i],
                        strides=strides[i],
                        regularizer=regularizer,
                        weight_decay=weight_decay,
                        name=name,
                        reuse=reuse,
                        scope=scope_i)

        if b_norm:
            name += '_bnorm'
            #scope_i = expand_scope_by_name(scope, name) # FORGOT TO PUT IT BEFORE ICLR
            layer = batch_normalization(layer,
                                        name=name,
                                        reuse=reuse,
                                        scope=scope_i)

        layer = non_linearity(layer)

        if dropout_prob is not None and dropout_prob[i] > 0:
            layer = dropout(layer, 1.0 - dropout_prob[i])

    if symmetry is not None:
        layer = symmetry(layer, axis=1)

    return layer
示例#28
0
def _model5():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    def block35(net, scale=1.0, activation="relu"):
        tower_conv = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None, name='Conv2d_1x1')))
        tower_conv1_0 = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None,name='Conv2d_0a_1x1')))
        tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 32, 3, bias=False, activation=None,name='Conv2d_0b_3x3')))
        tower_conv2_0 = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
        tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2_0, 48,3, bias=False, activation=None, name='Conv2d_0b_3x3')))
        tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 64,3, bias=False, activation=None, name='Conv2d_0c_3x3')))
        tower_mixed = merge([tower_conv, tower_conv1_1, tower_conv2_2], mode='concat', axis=3)
        tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net

    def block17(net, scale=1.0, activation="relu"):
        tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1')))
        tower_conv_1_0 = relu(batch_normalization(conv_2d(net, 128, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
        tower_conv_1_1 = relu(batch_normalization(conv_2d(tower_conv_1_0, 160,[1,7], bias=False, activation=None,name='Conv2d_0b_1x7')))
        tower_conv_1_2 = relu(batch_normalization(conv_2d(tower_conv_1_1, 192, [7,1], bias=False, activation=None,name='Conv2d_0c_7x1')))
        tower_mixed = merge([tower_conv,tower_conv_1_2], mode='concat', axis=3)
        tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net


    def block8(net, scale=1.0, activation="relu"):
        tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1')))
        tower_conv1_0 = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
        tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 224, [1,3], bias=False, activation=None, name='Conv2d_0b_1x3')))
        tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 256, [3,1], bias=False, name='Conv2d_0c_3x1')))
        tower_mixed = merge([tower_conv,tower_conv1_2], mode='concat', axis=3)
        tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net


    num_classes = len(yTest[0])
    dropout_keep_prob = 0.8

    network = input_data(shape=[None, inputSize, inputSize, dim],
             name='input',
             data_preprocessing=img_prep,
             data_augmentation=img_aug)
    conv1a_3_3 = relu(batch_normalization(conv_2d(network, 32, 3, strides=2, bias=False, padding='VALID',activation=None,name='Conv2d_1a_3x3')))
    conv2a_3_3 = relu(batch_normalization(conv_2d(conv1a_3_3, 32, 3, bias=False, padding='VALID',activation=None, name='Conv2d_2a_3x3')))
    conv2b_3_3 = relu(batch_normalization(conv_2d(conv2a_3_3, 64, 3, bias=False, activation=None, name='Conv2d_2b_3x3')))
    maxpool3a_3_3 = max_pool_2d(conv2b_3_3, 3, strides=2, padding='VALID', name='MaxPool_3a_3x3')
    conv3b_1_1 = relu(batch_normalization(conv_2d(maxpool3a_3_3, 80, 1, bias=False, padding='VALID',activation=None, name='Conv2d_3b_1x1')))
    conv4a_3_3 = relu(batch_normalization(conv_2d(conv3b_1_1, 192, 3, bias=False, padding='VALID',activation=None, name='Conv2d_4a_3x3')))
    maxpool5a_3_3 = max_pool_2d(conv4a_3_3, 3, strides=2, padding='VALID', name='MaxPool_5a_3x3')

    tower_conv = relu(batch_normalization(conv_2d(maxpool5a_3_3, 96, 1, bias=False, activation=None, name='Conv2d_5b_b0_1x1')))

    tower_conv1_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 48, 1, bias=False, activation=None, name='Conv2d_5b_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 64, 5, bias=False, activation=None, name='Conv2d_5b_b1_0b_5x5')))

    tower_conv2_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 64, 1, bias=False, activation=None, name='Conv2d_5b_b2_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2_0, 96, 3, bias=False, activation=None, name='Conv2d_5b_b2_0b_3x3')))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 96, 3, bias=False, activation=None,name='Conv2d_5b_b2_0c_3x3')))

    tower_pool3_0 = avg_pool_2d(maxpool5a_3_3, 3, strides=1, padding='same', name='AvgPool_5b_b3_0a_3x3')
    tower_conv3_1 = relu(batch_normalization(conv_2d(tower_pool3_0, 64, 1, bias=False, activation=None,name='Conv2d_5b_b3_0b_1x1')))

    tower_5b_out = merge([tower_conv, tower_conv1_1, tower_conv2_2, tower_conv3_1], mode='concat', axis=3)

    net = repeat(tower_5b_out, 10, block35, scale=0.17)

    '''
    tower_conv = relu(batch_normalization(conv_2d(net, 384, 3, bias=False, strides=2,activation=None, padding='VALID', name='Conv2d_6a_b0_0a_3x3')))
    tower_conv1_0 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 256, 3, bias=False, activation=None, name='Conv2d_6a_b1_0b_3x3')))
    tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 384, 3, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_6a_b1_0c_3x3')))
    tower_pool = max_pool_2d(net, 3, strides=2, padding='VALID',name='MaxPool_1a_3x3')
    net = merge([tower_conv, tower_conv1_2, tower_pool], mode='concat', axis=3)
    net = repeat(net, 20, block17, scale=0.1)

    tower_conv = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
    tower_conv0_1 = relu(batch_normalization(conv_2d(tower_conv, 384, 3, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_0a_1x1')))

    tower_conv1 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, padding='VALID', activation=None,name='Conv2d_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1,288,3, bias=False, strides=2, padding='VALID',activation=None, name='COnv2d_1a_3x3')))

    tower_conv2 = relu(batch_normalization(conv_2d(net, 256,1, bias=False, activation=None,name='Conv2d_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2, 288,3, bias=False, name='Conv2d_0b_3x3',activation=None)))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 320, 3, bias=False, strides=2, padding='VALID',activation=None, name='Conv2d_1a_3x3')))
    
    tower_pool = max_pool_2d(net, 3, strides=2, padding='VALID', name='MaxPool_1a_3x3')
    '''
    tower_conv = relu(batch_normalization(conv_2d(net, 384, 1, bias=False, strides=2,activation=None, padding='VALID', name='Conv2d_6a_b0_0a_3x3')))
    tower_conv1_0 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0b_3x3')))
    tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 384, 1, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_6a_b1_0c_3x3')))
    tower_pool = max_pool_2d(net, 1, strides=2, padding='VALID',name='MaxPool_1a_3x3')
    net = merge([tower_conv, tower_conv1_2, tower_pool], mode='concat', axis=3)
    net = repeat(net, 20, block17, scale=0.1)

    tower_conv = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
    tower_conv0_1 = relu(batch_normalization(conv_2d(tower_conv, 384, 1, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_0a_1x1')))

    tower_conv1 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, padding='VALID', activation=None,name='Conv2d_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1,288,1, bias=False, strides=2, padding='VALID',activation=None, name='COnv2d_1a_3x3')))

    tower_conv2 = relu(batch_normalization(conv_2d(net, 256,1, bias=False, activation=None,name='Conv2d_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2, 288,1, bias=False, name='Conv2d_0b_3x3',activation=None)))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 320, 1, bias=False, strides=2, padding='VALID',activation=None, name='Conv2d_1a_3x3')))
    
    
    tower_pool = max_pool_2d(net, 1, strides=2, padding='VALID', name='MaxPool_1a_3x3')
    
    ####
    net = merge([tower_conv0_1, tower_conv1_1,tower_conv2_2, tower_pool], mode='concat', axis=3)

    net = repeat(net, 9, block8, scale=0.2)
    net = block8(net, activation=None)

    net = relu(batch_normalization(conv_2d(net, 1536, 1, bias=False, activation=None, name='Conv2d_7b_1x1')))
    net = avg_pool_2d(net, net.get_shape().as_list()[1:3],strides=2, padding='VALID', name='AvgPool_1a_8x8')
    net = flatten(net)
    net = dropout(net, dropout_keep_prob)
    loss = fully_connected(net, num_classes,activation='softmax')


    network = tflearn.regression(loss, optimizer='RMSprop',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)
    model = tflearn.DNN(network, checkpoint_path='inception_resnet_v2',
                        max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir="./tflearn_logs/")

    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
X, Y = image_preloader(train_file,
                       image_shape=(height, width),
                       mode='file',
                       categorical_labels=True,
                       normalize=True)
testX, testY = image_preloader(test_file,
                               image_shape=(height, width),
                               mode='file',
                               categorical_labels=True,
                               normalize=True)

network = input_data(shape=[None, width, height], name='input')
network = tflearn.layers.core.reshape(network, [-1, width, height, 1],
                                      name='Reshape')

network1 = batch_normalization(
    conv_2d(network, 64, 1, activation='relu', regularizer="L2"))
network2 = batch_normalization(
    conv_2d(network, 64, 3, activation='relu', regularizer="L2"))
network = merge([network1, network2], 'concat')
#network = batch_normalization(conv_2d(network, 64, 1, activation='relu', regularizer="L2"))
network = fully_connected(network, 64, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
# Build neural network and train
network = regression(network,
                     optimizer='adam',
                     learning_rate=0.001,
                     loss='categorical_crossentropy',
                     name='target')
model = tflearn.DNN(network, tensorboard_verbose=0)
def build_net(network,
              X,
              Y,
              num_classes,
              num_epochs,
              checkpoint_path,
              size_batch,
              Xval=None,
              Yval=None,
              dec_step=100,
              train=True):
    tn = tflearn.initializations.truncated_normal(seed=100)
    xav = tflearn.initializations.xavier(seed=100)
    nor = tflearn.initializations.normal(seed=100)

    network = conv_2d(network, 32, 3, weights_init=nor, regularizer="L2")
    network = batch_normalization(network)
    network = tflearn.activations.softplus(network)
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)

    network = conv_2d(network, 64, 3, weights_init=nor, regularizer="L2")
    network = batch_normalization(network)
    network = tflearn.activations.softplus(network)
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)

    network = fully_connected(network, 128, weights_init=nor, regularizer="L2")
    network = batch_normalization(network)
    network = tflearn.activations.softplus(network)
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, weights_init=nor, regularizer="L2")
    network = batch_normalization(network)
    network = tflearn.activations.softplus(network)
    network = dropout(network, 0.8)

    network = fully_connected(network,
                              num_classes,
                              weights_init=nor,
                              activation='softmax')

    adadelta = tflearn.optimizers.AdaDelta(learning_rate=0.01,
                                           rho=0.95,
                                           epsilon=1e-08)

    network = regression(network,
                         optimizer=adadelta,
                         loss='categorical_crossentropy',
                         name='target')

    # Train
    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        checkpoint_path=checkpoint_path)
    if train:
        start_time = time.time()
        if Xval is None or Yval is None:
            model.fit(X,
                      Y,
                      n_epoch=num_epochs,
                      validation_set=0.0,
                      show_metric=True,
                      run_id='hsi_cnn_model',
                      shuffle=True,
                      batch_size=size_batch)
        else:
            model.fit(X,
                      Y,
                      n_epoch=num_epochs,
                      validation_set=(Xval, Yval),
                      show_metric=True,
                      run_id='hsi_cnn_model',
                      shuffle=True,
                      batch_size=size_batch)

        print("\n\n-------------train time: %s seconds\n\n" %
              (time.time() - start_time))

    return model
示例#31
0
def decoder_with_fc_only(latent_signal, layer_sizes=[], b_norm=True, non_linearity=tf.nn.relu,
                         regularizer=None, weight_decay=0.001, reuse=False, scope=None, dropout_prob=None,
                         b_norm_finish=False, verbose=False):
    '''A decoding network which maps points from the latent space back onto the data space.
    '''
    if verbose:
        print 'Building Decoder'

    n_layers = len(layer_sizes)
    dropout_prob = replicate_parameter_for_all_layers(dropout_prob, n_layers)

    if n_layers < 2:
        raise ValueError('For an FC decoder with single a layer use simpler code.')

    for i in xrange(0, n_layers - 1):
        name = 'decoder_fc_' + str(i)
        scope_i = expand_scope_by_name(scope, name)

        if i == 0:
            layer = latent_signal

        layer = fully_connected(layer, layer_sizes[i], activation='linear', weights_init='xavier', name=name, regularizer=regularizer, weight_decay=weight_decay, reuse=reuse, scope=scope_i)

        if verbose:
            print name, 'FC params = ', np.prod(layer.W.get_shape().as_list()) + np.prod(layer.b.get_shape().as_list()),

        if b_norm:
            name += '_bnorm'
            scope_i = expand_scope_by_name(scope, name)
            layer = batch_normalization(layer, name=name, reuse=reuse, scope=scope_i)
            if verbose:
                print 'bnorm params = ', np.prod(layer.beta.get_shape().as_list()) + np.prod(layer.gamma.get_shape().as_list())

        if non_linearity is not None:
            layer = non_linearity(layer)

        if dropout_prob is not None and dropout_prob[i] > 0:
            layer = dropout(layer, 1.0 - dropout_prob[i])

        if verbose:
            print layer
            print 'output size:', np.prod(layer.get_shape().as_list()[1:]), '\n'

    # Last decoding layer never has a non-linearity.
    name = 'decoder_fc_' + str(n_layers - 1)
    scope_i = expand_scope_by_name(scope, name)
    layer = fully_connected(layer, layer_sizes[n_layers - 1], activation='linear', weights_init='xavier', name=name, regularizer=regularizer, weight_decay=weight_decay, reuse=reuse, scope=scope_i)
    if verbose:
        print name, 'FC params = ', np.prod(layer.W.get_shape().as_list()) + np.prod(layer.b.get_shape().as_list()),

    if b_norm_finish:
        name += '_bnorm'
        scope_i = expand_scope_by_name(scope, name)
        layer = batch_normalization(layer, name=name, reuse=reuse, scope=scope_i)
        if verbose:
            print 'bnorm params = ', np.prod(layer.beta.get_shape().as_list()) + np.prod(layer.gamma.get_shape().as_list())

    if verbose:
        print layer
        print 'output size:', np.prod(layer.get_shape().as_list()[1:]), '\n'

    return layer
                                       nb_filter=256,
                                       filter_size='n',
                                       strides=4,
                                       padding='same',
                                       activation='relu',
                                       bias=True,
                                       weights_init='xavier',
                                       bias_init='zeros',
                                       regularizer='L2',
                                       weight_decay=0.0001,
                                       trainable=True,
                                       restore=True,
                                       reuse=False,
                                       scope=None,
                                       name='Conv1D_1')
network3 = batch_normalization(network2)
network4 = tflearn.layers.conv.max_pool_1d(network3,
                                           kernel_size=4,
                                           strides=None)
network5 = tflearn.layers.conv.conv_1d(network4,
                                       nb_filter=256,
                                       filter_size=3,
                                       strides=1,
                                       padding='same',
                                       activation='relu',
                                       bias=True,
                                       weights_init='xavier',
                                       bias_init='zeros',
                                       regularizer='L2',
                                       weight_decay=0.0001,
                                       trainable=True,
from tflearn.data_preprocessing import ImagePreprocessing
from tflearn.data_augmentation import ImageAugmentation
import numpy as np
from load_input import load_train_data


X_train, Y_train = load_train_data()
X_train, Y_train = shuffle(X_train, Y_train)
print('shuffle done')

X_val = X_train[2000:4000]
Y_val = Y_train[2000:4000]
network = input_data(shape=[None, 32, 32, 3])

network = conv_2d(network, 16, 3, activation='relu', weights_init='xavier')
network = batch_normalization(network)

network = conv_2d(network, 16, 3, activation='relu', weights_init='xavier')      
network = max_pool_2d(network, 2)
network = batch_normalization(network)

network = conv_2d(network, 32, 3, activation='relu', weights_init='xavier')    
network = max_pool_2d(network, 2)
network = batch_normalization(network)

network = conv_2d(network, 32, 3, activation='relu', weights_init='xavier')
network = max_pool_2d(network, 2)
network = batch_normalization(network)


network = conv_2d(network, 64, 3, activation='relu', weights_init='xavier')
def encoder_with_convs_and_masked_symmetry(
    in_signal,
    n_filters=[64, 128, 256, 1024],
    filter_sizes=[1],
    strides=[1],
    b_norm=True,
    non_linearity=tf.nn.relu,
    regularizer=None,
    weight_decay=0.001,
    symmetry=tf.reduce_max,
    dropout_prob=None,
    pool=avg_pool_1d,
    pool_sizes=None,
    scope=None,  #dropout none originally
    reuse=False,
    padding='same',
    verbose=False,
    closing=None,
    conv_op=conv_1d,
    mask=tf.Variable(1.0)):
    '''An Encoder (recognition network), which maps inputs onto a latent space.
    '''

    if verbose:
        print 'Building Encoder'

    n_layers = len(n_filters)
    filter_sizes = replicate_parameter_for_all_layers(filter_sizes, n_layers)
    strides = replicate_parameter_for_all_layers(strides, n_layers)
    # dropout_prob = dropout_prob * n_layers #shubham
    dropout_prob = replicate_parameter_for_all_layers(dropout_prob, n_layers)

    if n_layers < 2:
        raise ValueError('More than 1 layers are expected.')

    for i in xrange(n_layers):
        if i == 0:
            layer = in_signal

        name = 'encoder_conv_layer_' + str(i)
        scope_i = expand_scope_by_name(scope, name)
        layer = conv_op(layer,
                        nb_filter=n_filters[i],
                        filter_size=filter_sizes[i],
                        strides=strides[i],
                        regularizer=regularizer,
                        weight_decay=weight_decay,
                        name=name,
                        reuse=reuse,
                        scope=scope_i,
                        padding=padding)

        if verbose:
            print name, 'conv params = ', np.prod(
                layer.W.get_shape().as_list()) + np.prod(
                    layer.b.get_shape().as_list()),

        if b_norm:
            name += '_bnorm'
            scope_i = expand_scope_by_name(scope, name)
            layer = batch_normalization(layer,
                                        name=name,
                                        reuse=reuse,
                                        scope=scope_i)
            if verbose:
                print 'bnorm params = ', np.prod(
                    layer.beta.get_shape().as_list()) + np.prod(
                        layer.gamma.get_shape().as_list())

        if non_linearity is not None:
            layer = non_linearity(layer)

        if pool is not None and pool_sizes is not None:
            if pool_sizes[i] is not None:
                layer = pool(layer, kernel_size=pool_sizes[i])

        if dropout_prob is not None and dropout_prob[i] > 0:
            layer = dropout(layer, 1.0 - dropout_prob[i])

        if verbose:
            print layer
            print 'output size:', np.prod(
                layer.get_shape().as_list()[1:]), '\n'

    # print("mask min max:",tf.reduce_min(mask),tf.reduce_max(mask))
    print(mask)
    print(mask.get_shape())
    print(layer.get_shape())
    layer = tf.multiply(tf.cast(mask, tf.float32), tf.cast(layer, tf.float32))
    if symmetry is not None:
        layer = symmetry(layer, axis=1)
        if verbose:
            print layer

    if closing is not None:
        layer = closing(layer)
        print layer

    return layer
示例#35
0
    if activation:
        if isinstance(activation, str):
            net = activations.get(activation)(net)
        elif hasattr(activation, '__call__'):
            net = activation(net)
        else:
            raise ValueError("Invalid Activation.")
    return net

X, Y = oxflower17.load_data(one_hot=True, resize_pics=(299, 299))

num_classes = 17
dropout_keep_prob = 0.8

network = input_data(shape=[None, 299, 299, 3])
conv1a_3_3 = relu(batch_normalization(conv_2d(network, 32, 3, strides=2, bias=False, padding='VALID',activation=None,name='Conv2d_1a_3x3')))
conv2a_3_3 = relu(batch_normalization(conv_2d(conv1a_3_3, 32, 3, bias=False, padding='VALID',activation=None, name='Conv2d_2a_3x3')))
conv2b_3_3 = relu(batch_normalization(conv_2d(conv2a_3_3, 64, 3, bias=False, activation=None, name='Conv2d_2b_3x3')))
maxpool3a_3_3 = max_pool_2d(conv2b_3_3, 3, strides=2, padding='VALID', name='MaxPool_3a_3x3')
conv3b_1_1 = relu(batch_normalization(conv_2d(maxpool3a_3_3, 80, 1, bias=False, padding='VALID',activation=None, name='Conv2d_3b_1x1')))
conv4a_3_3 = relu(batch_normalization(conv_2d(conv3b_1_1, 192, 3, bias=False, padding='VALID',activation=None, name='Conv2d_4a_3x3')))
maxpool5a_3_3 = max_pool_2d(conv4a_3_3, 3, strides=2, padding='VALID', name='MaxPool_5a_3x3')

tower_conv = relu(batch_normalization(conv_2d(maxpool5a_3_3, 96, 1, bias=False, activation=None, name='Conv2d_5b_b0_1x1')))

tower_conv1_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 48, 1, bias=False, activation=None, name='Conv2d_5b_b1_0a_1x1')))
tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 64, 5, bias=False, activation=None, name='Conv2d_5b_b1_0b_5x5')))

tower_conv2_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 64, 1, bias=False, activation=None, name='Conv2d_5b_b2_0a_1x1')))
tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2_0, 96, 3, bias=False, activation=None, name='Conv2d_5b_b2_0b_3x3')))
tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 96, 3, bias=False, activation=None,name='Conv2d_5b_b2_0c_3x3')))
示例#36
0
def test1(x_train, y_train, x_test, y_test):
    # Train using classifier

    #define network
    int_put = input_data(shape=[None, 224, 5, 5, 1], )

    conv1 = conv_3d(int_put,
                    24, [24, 3, 3],
                    padding='VALID',
                    strides=[1, 1, 1, 1, 1],
                    activation='prelu',
                    weight_decay=0.05)
    print('conv1', conv1.get_shape().as_list())
    batch_norm = batch_normalization(conv1)
    #act1=tflearn.activations.relu(batch_norm)
    #pool1=max_pool_3d(act1,[1,1,2,2,1],strides=[1,1,1,1,1])

    conv2 = conv_3d(batch_norm,
                    12, [24, 3, 3],
                    padding='VALID',
                    strides=[1, 1, 1, 1, 1],
                    activation='prelu',
                    weight_decay=0.05)
    print('conv2', conv2.get_shape().as_list())
    batch_norm = batch_normalization(conv2)
    #act = tflearn.activations.relu(batch_norm)
    #pool2=max_pool_3d(act,[1,1,2,2,1],strides=[1,1,1,1,1])

    net = residual_block_concat(batch_norm,
                                2,
                                16,
                                batch_norm=None,
                                downsample_strides=1,
                                weight_decay=0.05)
    #net = residual_block(net, 5, 16)
    #net = residual_block(net, 1, 32, )
    #net = residual_block(net, 4, 32)
    #net = residual_block(net, 1, 64, downsample=True)
    #net = residual_block(net, 2, 64)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    '''
    conv3=conv_3d(batch_norm,24,[24,1,1],padding='VALID',strides=[1,5,1,1,1],activation='prelu')
    print('conv3', conv3.get_shape().as_list())
    batch_norm = batch_normalization(conv3)
    #act=tflearn.activations.relu(batch_norm)
    #pool3=max_pool_3d(act,[1,1,2,2,1],strides=[1,1,1,1,1])
    '''

    flat = flatten(net)
    print('flat', flat.get_shape().as_list())
    ip1 = fully_connected(
        flat,
        100,
        activation='prelu',
    )
    dro = dropout(ip1, 0.9)
    ip2 = fully_connected(
        dro,
        20,
        activation='softmax',
    )
    network = regression(ip2,
                         optimizer='Adagrad',
                         loss='categorical_crossentropy',
                         learning_rate=0.01)

    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        tensorboard_dir="./tflearn_logs/")
    model.fit(x_train,
              y_train,
              n_epoch=200,
              shuffle=True,
              validation_set=(x_test, y_test),
              show_metric=True,
              batch_size=32,
              run_id='3d_net')