def residual_block(conf, X, filters, block_num, dropout, scope_name):
    f0 = X.get_shape().as_list()[-1]
    f1, f2 = filters
    X_shortcut = X
    
    with tf.variable_scope(scope_name):
        X = ops.conv_layer(conf, X, k_shape=[3, 3, f0, f1], stride=1, padding='SAME', w_init='tn', scope_name='conv_1',
                           add_smry=False)
        X = ops.batch_norm(conf, X, scope_name='bn_1')
        X = ops.activation(X, "relu")
        logging.info('%s : conv_1 shape: %s', str(scope_name), str(X.shape))
        
        if dropout is not None:
            logging.info('%s : dropout = %s shape: %s', str(scope_name), str(dropout), str(X.shape))
            X = tf.nn.dropout(X, dropout)
        
        X = ops.conv_layer(conf, X, k_shape=[3, 3, f1, f2], stride=1, padding='SAME', w_init='tn', scope_name='conv_2',
                           add_smry=False)
        X = ops.batch_norm(conf, X, scope_name='bn_2')
        logging.info('%s : conv_2 shape: %s', str(scope_name), str(X.shape))
        
        # Add skip connection
        X = X + X_shortcut
        X = ops.activation(X, 'relu')
        logging.info('%s : Skip add shape: %s', str(scope_name), str(X.shape))
        
        return X
def resnet(conf, img_shape, device_type, use_dropout):
    inpX = tf.placeholder(dtype=tf.float32,
                          shape=[None, img_shape[0], img_shape[1], img_shape[2]],
                          name='X')
    inpY = tf.placeholder(dtype=tf.float32,
                          shape=[None, conf['myNet']['num_labels']],
                          name='Y')
    
    with tf.device(device_type):
        X_embeddings = embeddings(conf, inpX, use_dropout)
        X_embeddings = ops.activation(X_embeddings, 'relu', scope_name='relu_fc')
        logging.info('X - FC Layer (RELU): %s', str(X_embeddings.get_shape().as_list()))
        
        # SOFTMAX Layer
        X_logits = ops.fc_layers(conf, X_embeddings, [512, 2], w_init='tn', scope_name='fc_layer2', add_smry=False)
        logging.info('LOGITS - Softmax Layer: %s', str(X_logits.get_shape().as_list()))

        Y_probs = tf.nn.softmax(X_logits)
        logging.info('Softmax Y-Prob shape: shape %s', str(Y_probs.shape))

        loss = ops.get_loss(y_true=inpY, y_logits=X_logits, which_loss='sigmoid_cross_entropy', lamda=None)

        optimizer, l_rate = ops.optimize(conf, loss=loss, learning_rate_decay=True, add_smry=False)

        acc = ops.accuracy(labels=inpY, logits=X_logits, type='training', add_smry=False)

    return dict(inpX=inpX, inpY=inpY, outProbs=Y_probs, accuracy=acc, loss=loss, optimizer=optimizer, l_rate=l_rate)
def conv_1(conf, X, filters, scope_name):
    f = filters
    with tf.variable_scope(scope_name):
        X = ops.conv_layer(conf, X, k_shape=[7, 7, 3, f], stride=2, padding='SAME', w_init='tn', scope_name='conv_1',
                           add_smry=False)
        X = ops.batch_norm(conf, X, scope_name='bn_1')
        X = ops.activation(X, "relu")
        X = tf.layers.max_pooling2d(X, pool_size=3, padding='SAME', strides=2)
        logging.info('%s : conv_1 shape: %s', str(scope_name), str(X.shape))
    
    return X
def residual_block_first(conf, X, filters, block_num, dropout, scope_name):
    '''
    Why need this? Normally we have skip connections between 2 layers in one residual block.
    When going from 1 residual block to another we decrease in the image size, In-order to maintain skip connection
    between the layers, we need to have the same dimension for input and output.
    '''
    f0 = X.get_shape().as_list()[-1]
    f1, f2 = filters
    
    with tf.variable_scope(scope_name):
        # We perform a 1x1 conv increasing the num_out channels to equal the number of dimensions. We also perform
        # down sampling of convolutional layer by using a stride of 2
        X_shortcut = ops.conv_layer(conf, X, [1, 1, f0, f1], stride=2, padding='SAME', w_init='tn', scope_name='X_Shortcut',
                                    add_smry=False)
        logging.info('%s : conv_shortcut shape: %s', str(scope_name), str(X_shortcut.shape))
        
        X = ops.conv_layer(conf, X, [3, 3, f0, f1], stride=2, padding='SAME', w_init='tn', scope_name='conv_1',
                           add_smry=False)
        X = ops.batch_norm(conf, X, scope_name='bn_1')
        X = ops.activation(X, 'relu', scope_name='relu_1')
        logging.info('%s : conv_1 shape: %s', str(scope_name), str(X.shape))
        
        #if dropout is not None:
         #   logging.info('%s : dropout = %s shape: %s', str(scope_name), str(dropout), str(X.shape))
          #  X = tf.nn.dropout(X, dropout)
        
        X = ops.conv_layer(conf, X, [3, 3, f1, f2], stride=1, padding='SAME', w_init='tn', scope_name='conv_2',
                           add_smry=False)
        X = ops.batch_norm(conf, X, scope_name='bn_2')
        logging.info('%s : conv_2 shape: %s', str(scope_name), str(X.shape))
        
        X = X + X_shortcut
        X = ops.activation(X, 'relu', scope_name='relu_2')
        logging.info('%s : Skip add shape: %s', str(scope_name), str(X.shape))
    
    return X
def mixture_of_experts(conf, img_shape, device_type, use_dropout):
    inpX1 = tf.placeholder(dtype=tf.float32,
                          shape=[None, img_shape[0], img_shape[1], img_shape[2]],
                          name='expert1')
    inpX2 = tf.placeholder(dtype=tf.float32,
                          shape=[None, img_shape[0], img_shape[1], img_shape[2]],
                          name='expert2')
    inpY = tf.placeholder(dtype=tf.float32,
                          shape=[None, conf['myNet']['num_labels']],
                          name='Y')

    with tf.device(device_type):
        logging.info('Expert 1: Creating Computation graph for Expert 1 ............... ')
        with tf.variable_scope('Expert1'):
            embeddings_m1 = embeddings(inpX1, use_dropout)
        
        logging.info('Expert 2: Creating Computation graph for Expert 2 ............... ')
        with tf.variable_scope('Expert2'):
            embeddings_m2 = embeddings(inpX2, use_dropout)

        expert_embeddings = tf.concat(values=[embeddings_m1, embeddings_m2], axis=-1)
        expert_embeddings = ops.activation(expert_embeddings, type='sigmoid', scope_name='sigmoid')
        logging.info('EMBEDDINGS: Stacked (sigmoid Gate) %s', str(expert_embeddings.get_shape().as_list()))
    
        # SOFTMAX Layer
        X_logits = ops.fc_layers(conf, expert_embeddings, [1024, 2], w_init='tn', scope_name='softmax', add_smry=False)
        logging.info('LOGITS - Softmax Layer: %s', str(X_logits.get_shape().as_list()))
    
        Y_probs = tf.nn.softmax(X_logits)
        logging.info('Softmax Y-Prob shape: shape %s', str(Y_probs.shape))
    
        loss = ops.get_loss(y_true=inpY, y_logits=X_logits, which_loss='sigmoid_cross_entropy', lamda=None)
    
        optimizer, l_rate = ops.optimize(loss=loss, learning_rate_decay=True, add_smry=False)
    
        acc = ops.accuracy(labels=inpY, logits=X_logits, type='training', add_smry=False)

    return dict(inpX1=inpX1, inpX2=inpX2, inpY=inpY, outProbs=Y_probs, accuracy=acc, loss=loss,
         optimizer=optimizer, l_rate=l_rate)
    
Example #6
0
def encoder(X, encoding_filters, latent_dim):
    logging.info('INPUT shape: %s', str(X.shape))
    # Downsampling 1
    X = ops.conv_layer(
        X,
        k_shape=[3, 3, encoding_filters[0], encoding_filters[1]],
        stride=1,
        padding='SAME',
        w_init='gu',
        w_decay=None,
        scope_name='conv_1',
        add_smry=False)
    # X = ops.batch_norm(X, axis=[0, 1, 2], scope_name='bn_1')
    logging.info('ENCODER: conv_1 shape: %s' % (str(X.shape)))
    X = ops.activation(X, 'relu', 'relu_1')
    # print(X.shape)
    X = tf.nn.max_pool(X,
                       ksize=[1, 2, 2, 1],
                       strides=[1, 2, 2, 1],
                       padding='SAME',
                       name='pool_1')
    logging.info('ENCODER : pool_1 shape: %s' % (str(X.shape)))
    # print(X.shape)

    # Downsampling 2
    X = ops.conv_layer(
        X,
        k_shape=[3, 3, encoding_filters[1], encoding_filters[2]],
        stride=1,
        padding='SAME',
        w_init='gu',
        w_decay=None,
        scope_name='conv_2',
        add_smry=False)
    # X = ops.batch_norm(X, axis=[0, 1, 2], scope_name='bn_2')
    logging.info('ENCODER : conv_2 shape: %s' % (str(X.shape)))
    X = ops.activation(X, 'relu', 'relu_2')
    # print(X.shape)
    X = tf.nn.max_pool(X,
                       ksize=[1, 2, 2, 1],
                       strides=[1, 2, 2, 1],
                       padding='SAME',
                       name='pool_2')
    logging.info('ENCODER : pool_2 shape: %s' % (str(X.shape)))
    # print(X.shape)

    # Downsampling 3
    X = ops.conv_layer(
        X,
        k_shape=[3, 3, encoding_filters[2], encoding_filters[3]],
        stride=1,
        padding='SAME',
        w_init='gu',
        w_decay=None,
        scope_name='conv_3',
        add_smry=False)
    # X = ops.batch_norm(X, axis=[0, 1, 2], scope_name='bn_3')
    logging.info('ENCODER : conv_3 shape: %s' % (str(X.shape)))
    X = ops.activation(X, 'relu', 'relu_3')
    # print(X.shape)
    X = tf.nn.max_pool(X,
                       ksize=[1, 2, 2, 1],
                       strides=[1, 2, 2, 1],
                       padding='SAME',
                       name='pool_3')
    logging.info('ENCODER : pool_3 shape: %s' % (str(X.shape)))
    # print(X.shape)

    # Downsampling 4
    X = ops.conv_layer(
        X,
        k_shape=[3, 3, encoding_filters[3], encoding_filters[4]],
        stride=1,
        padding='SAME',
        w_init='gu',
        w_decay=None,
        scope_name='conv_4',
        add_smry=False)
    # X = ops.batch_norm(X, axis=[0, 1, 2], scope_name='bn_4')
    logging.info('ENCODER : conv_4 shape: %s' % (str(X.shape)))
    X = ops.activation(X, 'relu', 'relu_4')
    # print(X.shape)
    X = tf.nn.max_pool(X,
                       ksize=[1, 2, 2, 1],
                       strides=[1, 2, 2, 1],
                       padding='SAME',
                       name='pool_4')
    logging.info('ENCODER : pool_4 shape: %s' % (str(X.shape)))

    # Downloading 5
    X = ops.conv_layer(
        X,
        k_shape=[3, 3, encoding_filters[4], encoding_filters[5]],
        stride=1,
        padding='SAME',
        w_init='gu',
        w_decay=None,
        scope_name='conv_5',
        add_smry=False)
    # X = ops.batch_norm(X, axis=[0, 1, 2], scope_name='bn_5')
    logging.info('ENCODER : conv_5 shape: %s' % (str(X.shape)))
    X = ops.activation(X, 'relu', 'relu_5')
    # print(X.shape)
    X = tf.nn.max_pool(X,
                       ksize=[1, 2, 2, 1],
                       strides=[1, 2, 2, 1],
                       padding='SAME',
                       name='pool_5')
    logging.info('ENCODER : pool_5 shape: %s' % (str(X.shape)))

    # Downloading 5
    X = ops.conv_layer(
        X,
        k_shape=[3, 3, encoding_filters[5], encoding_filters[6]],
        stride=1,
        padding='SAME',
        w_init='gu',
        w_decay=None,
        scope_name='conv_6',
        add_smry=False)
    # X = ops.batch_norm(X, axis=[0, 1, 2], scope_name='bn_6')
    logging.info('ENCODER : conv_6 shape: %s' % (str(X.shape)))
    X = ops.activation(X, 'relu', 'relu_5')
    # print(X.shape)
    X = tf.nn.max_pool(X,
                       ksize=[1, 2, 2, 1],
                       strides=[1, 2, 2, 1],
                       padding='SAME',
                       name='pool_6')
    logging.info('ENCODER : pool_6 shape: %s' % (str(X.shape)))

    X_flat = tf.layers.flatten(X, 'X_flattened')
    X = ops.fc_layers(X_flat,
                      k_shape=[X_flat.get_shape().as_list()[-1], latent_dim],
                      w_init='gu',
                      scope_name='fc_layer',
                      add_smry=False)
    X = ops.activation(X, 'relu', 'relu_5')
    logging.info('ENCODER : Flattened shape: %s' % (str(X.shape)))
    # # # Downloading 2: Downsample to 2 features, to plot data points
    # X = ops.conv_layer(X, k_shape=[3, 3, encoding_filters[6], encoding_filters[7]], stride=1, padding='SAME',
    #                    w_init='gu', w_decay=None, scope_name='conv_6', add_smry=False)
    # logging.info('%s : conv_7 shape: %s', 'ENCODER: ', str(X.shape))
    # X = ops.activation(X, 'relu', 'relu_7')
    # # print(X.shape)
    # X = tf.nn.max_pool(X, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool_7')
    # logging.info('%s : pool_7 shape: %s', 'ENCODER: ', str(X.shape))
    #
    return X
Example #7
0
def decoder(X, decoding_filters, latent_dim):
    flattened_shape = [2, 2, 64]
    X = ops.fc_layers(X,
                      k_shape=[
                          latent_dim, flattened_shape[0] * flattened_shape[1] *
                          flattened_shape[2]
                      ],
                      w_init='gu',
                      scope_name='dec_fc_layer',
                      add_smry=False)
    X = ops.activation(X, 'relu', 'relu_5')
    print(X.shape)
    X = tf.reshape(X, [
        tf.shape(X)[0], flattened_shape[0], flattened_shape[1],
        flattened_shape[2]
    ])
    print(X.shape)
    # Upsample 1
    # X = ops.conv_layer(X, k_shape=[3,3,enc_fn,decoding_filters[0]],  stride=1, padding='SAME',  w_init='gu',
    # w_decay=None, scope_name='conv_4', add_smry=False)
    print(X.get_shape().as_list()[-1])
    X = ops.conv2D_transposed_strided(X,
                                      k_shape=[3, 3, 64, decoding_filters[0]],
                                      stride=2,
                                      padding='SAME',
                                      w_init='gu',
                                      out_shape=None,
                                      scope_name='dconv_1',
                                      add_smry=False)
    # X = ops.batch_norm(X, axis=[0, 1, 2], scope_name='bn_7')
    logging.info('%s : dconv_1 shape: %s', 'DECODER: ', str(X.shape))
    X = ops.activation(X, 'relu', 'relu_6')
    print(X.shape)

    # Upsample 2
    # X = ops.conv_layer(X, k_shape=[3, 3, decoding_filters[0], decoding_filters[1]], stride=1, padding='SAME',
    # w_init='gu', w_decay=None, scope_name='conv_5', add_smry=False)
    # print(X.shape)
    X = ops.conv2D_transposed_strided(
        X,
        k_shape=[3, 3, decoding_filters[0], decoding_filters[1]],
        stride=2,
        padding='SAME',
        w_init='gu',
        out_shape=None,
        scope_name='dconv_2',
        add_smry=False)
    # X = ops.batch_norm(X, axis=[0, 1, 2], scope_name='bn_8')
    logging.info('%s : dconv_2 shape: %s', 'DECODER: ', str(X.shape))
    X = ops.activation(X, 'relu', 'relu_7')
    # print(X.shape)

    # Upsampling 3
    # X = ops.conv_layer(X, k_shape=[3, 3, decoding_filters[1], decoding_filters[2]], stride=1, padding='SAME',
    # w_init='gu', w_decay=None, scope_name='conv_6', add_smry=False)
    # print(X.shape)
    X = ops.conv2D_transposed_strided(
        X,
        k_shape=[3, 3, decoding_filters[1], decoding_filters[2]],
        stride=2,
        padding='SAME',
        w_init='gu',
        out_shape=None,
        scope_name='dconv_3',
        add_smry=False)
    # X = ops.batch_norm(X, axis=[0, 1, 2], scope_name='bn_9')
    logging.info('%s : dconv_3 shape: %s', 'DECODER: ', str(X.shape))
    X = ops.activation(X, 'relu', 'relu_8')
    # print(X.shape)

    # Upsampling 4
    X = ops.conv2D_transposed_strided(
        X,
        k_shape=[3, 3, decoding_filters[2], decoding_filters[3]],
        stride=2,
        padding='SAME',
        w_init='gu',
        out_shape=None,
        scope_name='dconv_4',
        add_smry=False)
    # X = ops.batch_norm(X, axis=[0, 1, 2], scope_name='bn_10')
    logging.info('%s : dconv_4 shape: %s', 'DECODER: ', str(X.shape))
    X = ops.activation(X, 'relu', 'relu_9')

    # Upsampling 5
    X = ops.conv2D_transposed_strided(
        X,
        k_shape=[3, 3, decoding_filters[3], decoding_filters[4]],
        stride=2,
        padding='SAME',
        w_init='gu',
        out_shape=None,
        scope_name='dconv_5',
        add_smry=False)
    # X = ops.batch_norm(X, axis=[0, 1, 2], scope_name='bn_11')
    logging.info('%s : dconv_5 shape: %s', 'DECODER: ', str(X.shape))
    X = ops.activation(X, 'relu', 'relu_10')

    # Upsampling 6
    X = ops.conv2D_transposed_strided(
        X,
        k_shape=[3, 3, decoding_filters[4], decoding_filters[5]],
        stride=2,
        padding='SAME',
        w_init='gu',
        out_shape=None,
        scope_name='dconv_6',
        add_smry=False)
    # X = ops.batch_norm(X, axis=[0, 1, 2], scope_name='bn_12')
    logging.info('%s : dconv_6 shape: %s', 'DECODER: ', str(X.shape))
    X = ops.activation(X, 'relu', 'relu_11')

    # Decoding
    X = ops.conv_layer(
        X,
        k_shape=[3, 3, decoding_filters[5], decoding_filters[6]],
        stride=1,
        padding='SAME',
        w_init='gu',
        w_decay=None,
        scope_name='conv_7',
        add_smry=False)
    # X = ops.batch_norm(X, axis=[0, 1, 2], scope_name='bn_13')
    logging.info('%s : conv_7 shape: %s', 'DECODER: ', str(X.shape))

    sigmoid_logits = ops.activation(X, 'sigmoid', 'sigmoid_1')
    # print(X.shape)

    return X, sigmoid_logits