예제 #1
0
def tf_prediction_func( model ):
    #if model.arg is None:
    #    model.arg = [1.0, 1.0]
    # get data size
    NNlayer     = tf_layer()
    data_size   = int(model.data.get_shape()[1])
    im_shape    = (28, data_size//28)
    target_size = int(model.target.get_shape()[1])
    cnn_pool2d_len   = 2
    cnn_n_features   = 2
    cnn_out_size     = cnn_n_features*data_size//(cnn_pool2d_len**4)
    # one full connection layer
    # input data shape [-1, data_size],                     output data shape [-1, data_size]
    y1      = NNlayer.full_connection(model.data, in_fc_wide = data_size, out_fc_wide = data_size, activate_type = 'sigmoid')
    # input data shape [-1,  data_size], output data shape [-1, data_size, 1, 1]
    y1_4d   = tf.reshape(y1, [-1,im_shape[0],im_shape[1],1]) #reshape into 4d tensor
    # input data shape [-1,  data_size, 1, 1],               output data shape [-1, data_size/2, 1, cnn_n_feature]
    #y2      = NNlayer.convolution2d(y1_4d, cov_ker_size = (5,1), in_n_features = 1, out_n_features = cnn_n_features, pool_size = [1, cnn_pool1d_len, 1, 1], activate_type = 'sigmoid')
    # input data shape [-1,  data_size/2, 1, cnn_n_feature], output data shape [-1, data_size/4, 1, cnn_n_feature]
    #y3      = NNlayer.convolution2d(y2,    cov_ker_size = (5,1), in_n_features = cnn_n_features, out_n_features = cnn_n_features, pool_size = [1, cnn_pool1d_len, 1, 1], activate_type = 'sigmoid')
    y3      = NNlayer.multi_convolution2d(y1_4d, cov_ker_size = (5,5), n_cnn_layers = 2, \
                in_n_features_arr = (1,              cnn_n_features), \
               out_n_features_arr = (cnn_n_features, cnn_n_features), \
                   pool_size = [1, cnn_pool2d_len, cnn_pool2d_len, 1], activate_type = 'sigmoid')
    # input data shape [-1,  data_size/4, 1, cnn_n_feature], output data shape [-1, cnn_out_size=cnn_n_features*data_size//4]
    y3_flat = tf.reshape(y3, [-1, cnn_out_size]) #flatten
    # input data shape [-1, cnn_out_size],                  output data shape [-1, cnn_out_size]
    y4      = NNlayer.full_connection(y3_flat, in_fc_wide = cnn_out_size, out_fc_wide = cnn_out_size, activate_type = 'sigmoid')
    # input data shape [-1, cnn_out_size],                  output data shape [-1, target_size]
    y       = NNlayer.full_connection_dropout(y4, model.arg, in_fc_wide = cnn_out_size, out_fc_wide = target_size, activate_type = 'sigmoid')
    # softmax output
    return tf.nn.softmax(y)
def tf_prediction_func(model):
    #if model.arg is None:
    #    model.arg = [1.0, 1.0]
    # get data size
    NNlayer = tf_layer(w_std=0.2)
    data_size = int(model.data.get_shape()[1])
    target_size = int(model.target.get_shape()[1])
    mid_size = 256
    #y1_rn = model.data + NNlayer.full_connection(model.data, in_fc_wide = data_size, out_fc_wide = data_size,    activate_type = 'sigmoid', layer_norm = 1)
    #y2_rn = y1_rn      + NNlayer.full_connection(y1_rn,      in_fc_wide = data_size, out_fc_wide = data_size,    activate_type = 'sigmoid', layer_norm = 1)
    #y3_rn = y2_rn      + NNlayer.full_connection(y2_rn,      in_fc_wide = data_size, out_fc_wide = data_size,    activate_type = 'sigmoid', layer_norm = 1)

    # one full connection layer
    #y1 = NNlayer.full_connection(model.data, in_fc_wide = data_size, out_fc_wide = mid_size,    activate_type = 'sigmoid', layer_norm = 1)
    #y2 = NNlayer.multi_full_connection(y1, n_fc_layers = 8,            activate_type = 'sigmoid', layer_norm = 1)
    #y  = NNlayer.full_connection_dropout(y2, arg= model.arg,        in_fc_wide = mid_size,  out_fc_wide = target_size, activate_type = 'sigmoid')

    # resnet-FC
    y1 = NNlayer.full_connection(model.data,
                                 in_fc_wide=data_size,
                                 out_fc_wide=mid_size,
                                 activate_type='ReLU',
                                 layer_norm=1)
    y2 = y1 + NNlayer.multi_full_connection(
        y1, n_fc_layers=8, activate_type='ReLU', layer_norm=1)
    y3 = y2 + NNlayer.multi_full_connection(
        y2, n_fc_layers=8, activate_type='ReLU', layer_norm=1)
    y4 = y3 + NNlayer.multi_full_connection(
        y3, n_fc_layers=8, activate_type='ReLU', layer_norm=1)
    y = NNlayer.full_connection_dropout(y4,
                                        arg=model.arg,
                                        in_fc_wide=mid_size,
                                        out_fc_wide=target_size,
                                        activate_type='sigmoid')
    return y
예제 #3
0
def generator(z):
    NNlayer = tf_layer()
    #data_size   = int(z.get_shape()[1])
    #im_shape    = (int(z.get_shape()[1]), int(z.get_shape()[2]))
    #target_size = int(model.target.get_shape()[1])
    pool_len = 2
    n_features = 8
    ksize = (5, 5)
    #out_size = data_size#n_features*data_size//(pool_len**4)
    y1 = NNlayer.full_connection(z,
                                 in_fc_wide=1,
                                 out_fc_wide=4 * 4 * n_features,
                                 activate_type='None')
    # input data shape [-1,  mid_size], output data shape [-1, mid_size, 1, 1]
    y1_4d = tf.reshape(y1, [-1, 4, 4, n_features])  #reshape into 4d tensor
    #y1_4d    = tf.reshape(model.data, [-1,im_shape[0],im_shape[1],1]) #reshape into 4d tensor
    # input size   [-1, im_shape[0],          im_shape[1],          n_features ]
    # output size  [-1, im_shape[0]*pool_len, im_shape[1]*pool_len, n_features ]
    h2      = NNlayer.multi_deconvolution2d(y1_4d, cov_ker_size = ksize, n_cnn_layers = 3, \
                                           in_n_features_arr  = (n_features,   2*n_features, 4*n_features), \
                                           out_n_features_arr = (2*n_features, 4*n_features, 8*n_features), \
                                           pool_size = [1, pool_len, pool_len, 1], activate_type = 'ReLU')
    h3      = NNlayer.convolution2d(h2, cov_ker_size = ksize, in_n_features = 8*n_features, \
                                    out_n_features = 16*n_features, \
                                    pool_size = [1, pool_len, pool_len, 1], activate_type = 'tanh')
    return h3
예제 #4
0
def tf_prediction_func( model ):
    #if model.arg is None:
    #    model.arg = [1.0, 1.0]
    # get data size
    NNlayer     = tf_layer( w_std = 0.2 )
    data_size   = int(model.data.get_shape()[1])
    target_size = int(model.target.get_shape()[1])
    mid_size    = 256
    #y1_rn = model.data + NNlayer.full_connection(model.data, in_fc_wide = data_size, out_fc_wide = data_size,    activate_type = 'sigmoid', layer_norm = 1)
    #y2_rn = y1_rn      + NNlayer.full_connection(y1_rn,      in_fc_wide = data_size, out_fc_wide = data_size,    activate_type = 'sigmoid', layer_norm = 1)
    #y3_rn = y2_rn      + NNlayer.full_connection(y2_rn,      in_fc_wide = data_size, out_fc_wide = data_size,    activate_type = 'sigmoid', layer_norm = 1)

    # one full connection layer
    y1 = NNlayer.full_connection(model.data, in_fc_wide = data_size, out_fc_wide = mid_size,    activate_type = 'sigmoid', layer_norm = 1)
    y2 = NNlayer.multi_full_connection(y1, n_fc_layers = 8,            activate_type = 'sigmoid', layer_norm = 1)   
    y  = NNlayer.full_connection_dropout(y2, arg= model.arg,        in_fc_wide = mid_size,  out_fc_wide = target_size, activate_type = 'sigmoid')
    #y   = NNlayer.multi_full_connection(model.data, n_fc_layers = 12, \
    #                                    in_fc_wide_arr  = (data_size, mid_size, mid_size, mid_size, mid_size, mid_size, mid_size, mid_size, mid_size, mid_size, mid_size, mid_size), \
    #                                   out_fc_wide_arr = (mid_size,  mid_size, mid_size, mid_size, mid_size, mid_size, mid_size, mid_size, mid_size, mid_size, mid_size, target_size), \
    #                                    activate_type = 'sigmoid')
    #y   = NNlayer.multi_full_connection(model.data, n_fc_layers = 8, \
    #                                    in_fc_wide_arr = (data_size,    data_size//2, data_size//4, data_size//8,  data_size//16, data_size//32, data_size//64, data_size//128),\
    #                                   out_fc_wide_arr = (data_size//2, data_size//4, data_size//8, data_size//16, data_size//32, data_size//64, data_size//128, target_size),\
    #                                     activate_type = 'ReLU')
    # softmax output
    # softmax output
    return y#tf.nn.softmax(y)
예제 #5
0
def tf_prediction_func(model):
    #if model.arg is None:
    #    model.arg = [1.0, 1.0]
    # get data size
    NNlayer = tf_layer()
    data_size = int(model.data.get_shape()[1])
    im_shape = (28, data_size // 28)
    target_size = int(model.target.get_shape()[1])
    cnn_pool2d_len = 2
    cnn_n_features = 2
    cnn_out_size = data_size  #cnn_n_features*data_size//(cnn_pool2d_len**4)
    y1_4d = tf.reshape(
        model.data, [-1, im_shape[0], im_shape[1], 1])  #reshape into 4d tensor
    y3      = NNlayer.multi_convolution2d(y1_4d, cov_ker_size = (5,5), n_cnn_layers = 2, \
                                          in_n_features_arr  = (1,              cnn_n_features), \
                                          out_n_features_arr = (cnn_n_features, cnn_n_features), \
                                          pool_size = [1, cnn_pool2d_len, cnn_pool2d_len, 1],\
                                          activate_type = 'sigmoid')
    y4      = NNlayer.multi_deconvolution2d(y3, cov_ker_size = (5,5), n_cnn_layers = 2, \
                                            in_n_features_arr = (cnn_n_features, cnn_n_features), \
                                            out_n_features_arr = (cnn_n_features, 1), \
                                            conv_strides = [1, cnn_pool2d_len, cnn_pool2d_len, 1], \
                                            activate_type = 'sigmoid')
    # input data shape [-1,  data_size/4, 1, cnn_n_feature], output data shape [-1, cnn_out_size=cnn_n_features*data_size//4]
    y = tf.reshape(y4, [-1, cnn_out_size])  #flatten
    # softmax output
    return y  #tf.nn.softmax(y)
예제 #6
0
def tf_prediction_func(model):
    #if model.arg is None:
    #    model.arg = [1.0, 1.0]
    # get data size
    NNlayer = tf_layer(w_std=0.4)
    data_size = int(model.data.get_shape()[1])
    target_size = int(model.target.get_shape()[1])
    mid_size = 256
    cnn_pool1d_len = 2
    cnn_n_features = 8
    n_cnn_layers = 6
    cnn_ker_size = 32
    cnn_out_size = (mid_size * cnn_n_features) // (cnn_pool1d_len**
                                                   n_cnn_layers)
    # one full connection layer
    # input data shape [-1, data_size],                     output data shape [-1, mid_size]
    y1 = NNlayer.full_connection(model.data,
                                 in_fc_wide=data_size,
                                 out_fc_wide=mid_size,
                                 activate_type='sigmoid')
    y2 = NNlayer.multi_full_connection(y1,
                                       n_fc_layers=1,
                                       activate_type='sigmoid')
    # input data shape [-1,  mid_size], output data shape [-1, mid_size, 1, 1]
    y1_4d = tf.reshape(y2, [-1, mid_size, 1, 1])  #reshape into 4d tensor
    # input data shape [-1,  mid_size, 1, 1],               output data shape [-1, mid_size/2, 1, cnn_n_feature]
    #y2      = NNlayer.convolution2d(y1_4d, cov_ker_size = (5,1), in_n_features = 1, out_n_features = cnn_n_features, pool_size = [1, cnn_pool1d_len, 1, 1], activate_type = 'sigmoid')
    # input data shape [-1,  mid_size/2, 1, cnn_n_feature], output data shape [-1, mid_size/4, 1, cnn_n_feature]
    #y3      = NNlayer.convolution2d(y2,    cov_ker_size = (5,1), in_n_features = cnn_n_features, out_n_features = cnn_n_features, pool_size = [1, cnn_pool1d_len, 1, 1], activate_type = 'sigmoid')
    y3      = NNlayer.multi_convolution2d(y1_4d, cov_ker_size = (cnn_ker_size,1), n_cnn_layers = n_cnn_layers, \
                in_n_features_arr = (1,              cnn_n_features, cnn_n_features, cnn_n_features, cnn_n_features, cnn_n_features), \
               out_n_features_arr = (cnn_n_features, cnn_n_features, cnn_n_features, cnn_n_features, cnn_n_features, cnn_n_features), \
                   pool_size = [1, cnn_pool1d_len, 1, 1], activate_type = 'sigmoid')
    # input data shape [-1,  mid_size/4, 1, cnn_n_feature], output data shape [-1, cnn_out_size=cnn_n_features*mid_size//4]
    y3_flat = tf.reshape(y3, [-1, cnn_out_size])  #flatten
    y4 = NNlayer.multi_full_connection(y3_flat,
                                       n_fc_layers=1,
                                       activate_type='sigmoid')
    # input data shape [-1, cnn_out_size],                  output data shape [-1, cnn_out_size]
    y = NNlayer.full_connection(y4,
                                in_fc_wide=cnn_out_size,
                                out_fc_wide=target_size,
                                activate_type='sigmoid')
    # input data shape [-1, cnn_out_size],                  output data shape [-1, target_size]
    #y       = NNlayer.full_connection_dropout(y4, model.arg, in_fc_wide = cnn_out_size, out_fc_wide = target_size, activate_type = 'sigmoid')
    # softmax output
    return y  #tf.nn.softmax(y)
예제 #7
0
def tf_prediction_func( model ):
    #if model.arg is None:
    #    model.arg = [1.0, 1.0]
    # get data size
    NNlayer     = tf_layer()
    data_size   = int(model.data.get_shape()[1])
    target_size = int(model.target.get_shape()[1])
    mid_size    = 256
    # one full connection layer
    #y1 = NNlayer.full_connection(model.data, in_fc_wide = data_size, out_fc_wide = mid_size,    activate_type = 'sigmoid')
    #y  = NNlayer.full_connection(y1,         in_fc_wide = mid_size,  out_fc_wide = target_size, activate_type = 'sigmoid')
    y   = NNlayer.multi_full_connection(model.data, n_fc_layers = 3, \
                                        in_fc_wide_arr  = (data_size, mid_size, mid_size), \
                                        out_fc_wide_arr = (mid_size,  mid_size, target_size), \
                                        activate_type = 'sigmoid')
    # softmax output
    return tf.nn.softmax(y)
def tf_prediction_func( model ):
    #if model.arg is None:
    #    model.arg = [1.0, 1.0]
    # get data size
    NNlayer     = tf_layer()
    data_size   = int(model.data.get_shape()[1])
    target_size = int(model.target.get_shape()[1])
    mid_size    = 960*5
    # one full connection layer
    #y1 = NNlayer.full_connection(model.data, in_fc_wide = data_size, out_fc_wide = mid_size,    activate_type = 'ReLU')
    #y  = NNlayer.full_connection(y1,         in_fc_wide = mid_size,  out_fc_wide = target_size, activate_type = 'ReLU')
    y   = NNlayer.multi_full_connection(model.data, n_fc_layers = 6, \
                                        in_fc_wide_arr = (data_size,    data_size//2, data_size//4, data_size//8,  data_size//16, data_size//32),\
                                       out_fc_wide_arr = (data_size//2, data_size//4, data_size//8, data_size//16, data_size//32,target_size),\
                                         activate_type = 'ReLU')
    #yout = NNlayer.full_connection_dropout(y, model.arg, in_fc_wide = data_size//32, out_fc_wide = target_size, activate_type = 'sigmoid')

    # softmax output
    return y#tf.nn.sigmoid(y)
예제 #9
0
def discriminator(image, reuse=False):
    NNlayer = tf_layer()
    pool_len = 2
    n_features = 8
    ksize = (5, 5)
    cnn_out_size = int(n_features * image.get_shape()[1]) * int(
        image.get_shape()[2]) // (pool_len**4)
    if reuse:
        tf.get_variable_scope().reuse_variables()
    h1      = NNlayer.multi_convolution2d(image, cov_ker_size = ksize, n_cnn_layers = 4, \
                                           in_n_features_arr  = (16*n_features, 8*n_features, 4*n_features, 2*n_features), \
                                           out_n_features_arr = ( 8*n_features, 4*n_features, 2*n_features,   n_features), \
                                           pool_size = [1, pool_len, pool_len, 1], activate_type = 'ReLU')
    y2 = tf.reshape(h1, [-1, cnn_out_size])  #flatten
    y3 = NNlayer.full_connection(y2,
                                 in_fc_wide=cnn_out_size,
                                 out_fc_wide=1,
                                 activate_type='None')
    return tf.nn.sigmoid(y3), y3
예제 #10
0
def tf_prediction_func_cnn(model):
    #if model.arg is None:
    #    model.arg = [1.0, 1.0]
    # get data size
    NNlayer = tf_layer()
    #data_size   = int(model.data.get_shape()[1])
    #im_shape    = (model.data.shape[1], model.data.shape[2])#(100, 100)#
    #target_size = int(model.target.get_shape()[1])
    pool_len = 2
    n_features = 32
    cnn_ksize = (10, 10)


    y0 = NNlayer.convolution2d(model.data, cov_ker_size = 2*cnn_ksize, \
                                           in_n_features  = 1, out_n_features = n_features, \
                                           pool_type = 'None', activate_type = 'ReLU', layer_norm = 1)

    y_res = NNlayer.multi_convolution2d_residual(y0,
                                                 n_cnn_layers=6,
                                                 cov_ker_size=cnn_ksize,
                                                 in_n_features=n_features,
                                                 activate_type='ReLU')
    #y_merge1 = NNlayer.merge(y0, y_res, axis = 3, merge_type = 'concat')
    #y_res2 = NNlayer.multi_convolution2d_residual(y_merge1, n_cnn_layers = 6, cov_ker_size = cnn_ksize, in_n_features  = 2*n_features, activate_type = 'ReLU')
    #y_merge2 = NNlayer.merge(y_merge1, y_res2, axis = 3, merge_type = 'concat')
    #y_res3 = NNlayer.multi_convolution2d_residual(y_merge2, n_cnn_layers = 6, cov_ker_size = cnn_ksize, in_n_features  = 4*n_features, activate_type = 'ReLU')
    #y_merge3 = NNlayer.merge(y_merge2, y_res3, axis = 3, merge_type = 'concat')

    y1 = NNlayer.multi_convolution2d(y_res, cov_ker_size = cnn_ksize, n_cnn_layers = 6, \
                                           in_n_features_arr  = (n_features,  n_features,  n_features,  n_features,  n_features,  n_features), \
                                           out_n_features_arr = (n_features,  n_features,  n_features,  n_features,  n_features, n_features), \
                                           pool_type = 'None', activate_type = 'sigmoid', layer_norm = 1)
    y1_dropout = tf.nn.dropout(y1, keep_prob=model.arg)
    y2 = NNlayer.convolution2d(y1, cov_ker_size = cnn_ksize,\
                                           in_n_features  = n_features, \
                                           out_n_features =  1, \

                                           pool_type = 'None', activate_type = 'sigmoid')

    return y2
예제 #11
0
def tf_prediction_func(model):
    #if model.arg is None:
    #    model.arg = [1.0, 1.0]
    # get data size
    NNlayer = tf_layer(debug=1)
    data_size = int(model.data.get_shape()[1])
    im_shape = (28, data_size // 28)
    target_size = int(model.target.get_shape()[1])
    pool_len = 2
    n_features = 2
    out_size = data_size  #n_features*data_size//(pool_len**4)
    y1_4d = tf.reshape(
        model.data, [-1, im_shape[0], im_shape[1], 1])  #reshape into 4d tensor
    # input size   [-1, im_shape[0],          im_shape[1],         1 ]
    # output size  [-1, im_shape[0],          im_shape[1], n_features ]
    conv1_12 = NNlayer.multi_convolution2d(y1_4d, cov_ker_size = (5,5), n_cnn_layers = 2, \
                                           in_n_features_arr  = (1,          n_features), \
                                           out_n_features_arr = (n_features, n_features), \
                                           pool_type = 'None', activate_type = 'ReLU')
    # input size   [-1, im_shape[0],          im_shape[1],          n_features ]
    # output size  [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    pool1    = NNlayer.pool(conv1_12, pool_size = [1, pool_len, pool_len, 1], \
                            pool_type = 'max_pool')
    # input size   [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    # output size  [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    conv2_12 = NNlayer.multi_convolution2d(pool1, cov_ker_size = (5,5), n_cnn_layers = 2, \
                                           in_n_features_arr  = (n_features, n_features), \
                                           out_n_features_arr = (n_features, n_features), \
                                           pool_type = 'None', activate_type = 'ReLU')
    # input size   [-1, im_shape[0]/pool_len,      im_shape[1]/pool_len,      n_features ]
    # output size  [-1, im_shape[0]/(pool_len**2), im_shape[1]/(pool_len**2), n_features ]
    pool2    = NNlayer.pool(conv2_12, pool_size = [1, pool_len, pool_len, 1], \
                            pool_type = 'max_pool')

    # input size   [-1, im_shape[0]/(pool_len**2), im_shape[1]/(pool_len**2), n_features ]
    # output size  [-1, im_shape[0]/(pool_len**2), im_shape[1]/(pool_len**2), n_features ]
    #fc_len  = n_features*data_size//(pool_len**4)
    #fc_in   = tf.reshape(pool2, [-1, fc_len]) #reshape into 4d tensor
    #fc_done = NNlayer.multi_full_connection_dropout(fc_in, model.arg, n_fc_layers = 3, \
    #                                    in_fc_wide_arr  = (fc_len, fc_len, fc_len), \
    #                                    out_fc_wide_arr = (fc_len, fc_len, fc_len), \
    #                                    activate_type = 'sigmoid')
    #fc_out   = tf.reshape(fc_done, [-1,im_shape[0]//(pool_len**2),im_shape[1]//(pool_len**2),n_features]) #reshape into 4d tensor

    # input size   [-1, im_shape[0]/(pool_len**2), im_shape[1]/(pool_len**2), n_features ]
    # output size  [-1, im_shape[0]/pool_len,      im_shape[1]/pool_len,      n_features ]
    up3      = NNlayer.deconvolution2d(pool2, cov_ker_size = (5,5), \
                                            in_n_features = n_features, out_n_features = n_features, \
                                            conv_strides = [1, pool_len, pool_len, 1], activate_type = 'ReLU')
    # input size   [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    # output size  [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    merge3 = NNlayer.merge(conv2_12, up3, axis=3, merge_type='add')
    # input size   [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    # output size  [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    conv3_12 = NNlayer.multi_convolution2d(merge3, cov_ker_size = (5,5), n_cnn_layers = 2, \
                                           in_n_features_arr  = (n_features, n_features), \
                                           out_n_features_arr = (n_features, n_features), \
                                           pool_type = 'None', activate_type = 'ReLU')
    # input size   [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    # output size  [-1, im_shape[0],          im_shape[1],          n_features ]
    up4      = NNlayer.deconvolution2d(conv3_12, cov_ker_size = (5,5), \
                                            in_n_features = n_features, out_n_features = n_features, \
                                            conv_strides = [1, pool_len, pool_len, 1], activate_type = 'ReLU')
    # input size   [-1, im_shape[0], im_shape[1], n_features ]
    # output size  [-1, im_shape[0], im_shape[1], n_features ]
    merge4 = NNlayer.merge(conv1_12, up4, axis=3, merge_type='add')
    # input size   [-1, im_shape[0], im_shape[1], n_features ]
    # output size  [-1, im_shape[0], im_shape[1], 1 ]
    conv4_12 = NNlayer.multi_convolution2d(merge4, cov_ker_size = (5,5), n_cnn_layers = 2, \
                                           in_n_features_arr  = (n_features, n_features), \
                                           out_n_features_arr = (n_features,   1), \
                                           pool_type = 'None', activate_type = 'sigmoid')
    # input data shape [-1,  data_size/4, 1, cnn_n_feature], output data shape [-1, out_size=n_features*data_size//4]
    y = tf.reshape(conv4_12, [-1, out_size])  #flatten
    # softmax output
    return y  #tf.nn.softmax(y)
예제 #12
0
def tf_prediction_func_fcn2(model):
    #if model.arg is None:
    #    model.arg = [1.0, 1.0]
    # get data size
    NNlayer = tf_layer()
    #data_size   = int(model.data.get_shape()[1])
    im_shape = (100, 100)  #(model.data.shape[1], model.data.shape[2])#
    #target_size = int(model.target.get_shape()[1])
    pool_len = 2
    n_features = 8
    cnn_ksize = (10, 10)

    y0 = NNlayer.convolution2d(model.data, cov_ker_size = cnn_ksize, \
                                           in_n_features  = 1, out_n_features = n_features, \
                                           pool_type = 'None', activate_type = 'ReLU', layer_norm = 1)

    y_res = NNlayer.multi_convolution2d_residual(y0,
                                                 n_cnn_layers=10,
                                                 cov_ker_size=cnn_ksize,
                                                 in_n_features=n_features,
                                                 activate_type='ReLU')

    #out_size = data_size#n_features*data_size//(pool_len**4)
    #y1_4d    = tf.reshape(model.data, [-1,im_shape[0],im_shape[1],1]) #reshape into 4d tensor
    # input size   [-1, im_shape[0],          im_shape[1],         1 ]
    # output size  [-1, im_shape[0],          im_shape[1], n_features ]
    conv1_12 = NNlayer.multi_convolution2d(y_res, cov_ker_size = cnn_ksize, n_cnn_layers = 3, \
                                           in_n_features_arr  = (n_features, n_features,  n_features), \
                                           out_n_features_arr = (n_features, n_features,  n_features), \
                                           pool_type = 'None', activate_type = 'ReLU', layer_norm = 1)
    # input size   [-1, im_shape[0],          im_shape[1],          n_features ]
    # output size  [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    pool1    = NNlayer.pool(conv1_12, pool_size = [1, pool_len, pool_len, 1], \
                            pool_type = 'max_pool')
    # input size   [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, 2*n_features ]
    # output size  [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, 2*n_features ]
    conv2_12 = NNlayer.multi_convolution2d(pool1, cov_ker_size = cnn_ksize, n_cnn_layers = 3, \
                                           in_n_features_arr  = (n_features,   2*n_features,  2*n_features), \
                                           out_n_features_arr = (2*n_features, 2*n_features,  2*n_features), \
                                           pool_type = 'None', activate_type = 'ReLU', layer_norm = 1)
    # input size   [-1, im_shape[0]/pool_len,      im_shape[1]/pool_len,      2*n_features ]
    # output size  [-1, im_shape[0]/(pool_len**2), im_shape[1]/(pool_len**2), 2*n_features ]
    pool2    = NNlayer.pool(conv2_12, pool_size = [1, pool_len, pool_len, 1], \
                            pool_type = 'max_pool')

    # input size   [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, 4*n_features ]
    # output size  [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, 4*n_features ]
    conv3_12 = NNlayer.multi_convolution2d(pool2, cov_ker_size = cnn_ksize, n_cnn_layers = 3, \
                                           in_n_features_arr  = (2*n_features,   4*n_features,  4*n_features), \
                                           out_n_features_arr = (4*n_features,   4*n_features,  4*n_features), \
                                           pool_type = 'None', activate_type = 'ReLU', layer_norm = 1)
    #conv3_12_dropout = tf.nn.dropout(conv3_12, keep_prob = model.arg)
    # input size   [-1, im_shape[0]/(pool_len**2), im_shape[1]/(pool_len**2), 4*n_features ]
    # output size  [-1, im_shape[0]/pool_len,      im_shape[1]/pool_len,      4*n_features ]
    up3      = NNlayer.deconvolution2d(conv3_12, cov_ker_size = ( pool_len, pool_len), \
                                            in_n_features = 4*n_features, out_n_features = 2*n_features, \
                                            conv_strides = [1, pool_len, pool_len, 1], activate_type = 'ReLU', layer_norm = 1)
    # input size   [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, 4*n_features ]
    # output size  [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, 4*n_features ]
    merge3 = NNlayer.merge(conv2_12, up3, axis=3, merge_type='add')
    # input size   [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, 2*n_features ]
    # output size  [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    conv4_12 = NNlayer.multi_convolution2d(merge3, cov_ker_size = (pool_len, pool_len), n_cnn_layers = 3, \
                                           in_n_features_arr  = (2*n_features, 2*n_features,  2*n_features), \
                                           out_n_features_arr = (2*n_features, 2*n_features,  2*n_features), \
                                           pool_type = 'None', activate_type = 'ReLU', layer_norm = 1)
    # input size   [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    # output size  [-1, im_shape[0],          im_shape[1],          n_features ]
    up4      = NNlayer.deconvolution2d(conv4_12, cov_ker_size = cnn_ksize, \
                                            in_n_features = 2*n_features, out_n_features = 1*n_features, \
                                            conv_strides = [1, pool_len, pool_len, 1], activate_type = 'ReLU', layer_norm = 1)
    # input size   [-1, im_shape[0], im_shape[1], n_features ]
    # output size  [-1, im_shape[0], im_shape[1], 2*n_features ]
    merge4 = NNlayer.merge(conv1_12, up4, axis=3, merge_type='add')
    # input size   [-1, im_shape[0], im_shape[1], 2*n_features ]
    # output size  [-1, im_shape[0], im_shape[1], 1 ]
    conv5_12 = NNlayer.multi_convolution2d(merge4, cov_ker_size = cnn_ksize, n_cnn_layers = 3, \
                                           in_n_features_arr  = (1*n_features, n_features,  n_features), \
                                           out_n_features_arr = (n_features,  n_features,   1), \
                                           pool_type = 'None', activate_type = 'sigmoid')
    # input data shape [-1,  data_size/4, 1, cnn_n_feature], output data shape [-1, out_size=n_features*data_size//4]
    #y = tf.reshape(conv5_12, [-1, im_shape[0], im_shape[1]]) #flatten
    # softmax output
    return conv5_12  #tf.argmax(conv5_12)
예제 #13
0
def tf_prediction_func_unet(model):
    #if model.arg is None:
    #    model.arg = [1.0, 1.0]
    # get data size
    NNlayer = tf_layer()
    #data_size   = int(model.data.get_shape()[1])
    #im_shape    = (100, 100)#(model.data.shape[1], model.data.shape[2])#
    #target_size = int(model.target.get_shape()[1])
    pool_len = 2
    n_features = 32
    cnn_ksize = (10, 10)

    #full connection layer
    #y1_2d   = tf.reshape(model.data, [-1,im_shape[0]*im_shape[1]]) #reshape into 2d tensor
    #y1       = NNlayer.multi_full_connection(y1_2d, n_fc_layers = 6, activate_type = 'ReLU')
    #y1_4d   = tf.reshape(y1, [-1,im_shape[0],im_shape[1],1]) #reshape into 4d tensor
    """
    y1 = model.data + NNlayer.multi_convolution2d(model.data, cov_ker_size = cnn_ksize, n_cnn_layers = 3, \
                                           in_n_features_arr  = (1,          n_features,  n_features), \
                                           out_n_features_arr = (n_features, n_features,  1), \
                                           pool_type = 'None', activate_type = 'ReLU')
    y2 = y1 + NNlayer.multi_convolution2d(y1, cov_ker_size = cnn_ksize, n_cnn_layers = 3, \
                                           in_n_features_arr  = (1,          n_features,  n_features), \
                                           out_n_features_arr = (n_features, n_features,  1), \
                                           pool_type = 'None', activate_type = 'ReLU')
    y3 = y2 + NNlayer.multi_convolution2d(y2, cov_ker_size = cnn_ksize, n_cnn_layers = 3, \
                                           in_n_features_arr  = (1,          n_features,  n_features), \
                                           out_n_features_arr = (n_features, n_features,  1), \
                                           pool_type = 'None', activate_type = 'ReLU')
    """
    #out_size = data_size#n_features*data_size//(pool_len**4)
    #y1_4d    = tf.reshape(model.data, [-1,im_shape[0],im_shape[1],1]) #reshape into 4d tensor
    # input size   [-1, im_shape[0],          im_shape[1],         1 ]
    # output size  [-1, im_shape[0],          im_shape[1], n_features ]
    conv1_12 = NNlayer.multi_convolution2d(model.data, cov_ker_size = cnn_ksize, n_cnn_layers = 3, \
                                           in_n_features_arr  = (1,          n_features,  n_features), \
                                           out_n_features_arr = (n_features, n_features,  n_features), \
                                           pool_type = 'None', activate_type = 'ReLU')
    # input size   [-1, im_shape[0],          im_shape[1],          n_features ]
    # output size  [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    pool1    = NNlayer.pool(conv1_12, pool_size = [1, pool_len, pool_len, 1], \
                            pool_type = 'max_pool')
    # input size   [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    # output size  [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    conv2_12 = NNlayer.multi_convolution2d(pool1, cov_ker_size = cnn_ksize, n_cnn_layers = 3, \
                                           in_n_features_arr  = (n_features, n_features,  n_features), \
                                           out_n_features_arr = (n_features, n_features,  n_features), \
                                           pool_type = 'None', activate_type = 'ReLU')
    # input size   [-1, im_shape[0]/pool_len,      im_shape[1]/pool_len,      n_features ]
    # output size  [-1, im_shape[0]/(pool_len**2), im_shape[1]/(pool_len**2), n_features ]
    pool2    = NNlayer.pool(conv2_12, pool_size = [1, pool_len, pool_len, 1], \
                            pool_type = 'max_pool')
    # input size   [-1, im_shape[0]/(pool_len**2), im_shape[1]/(pool_len**2), n_features ]
    # output size  [-1, im_shape[0]/pool_len,      im_shape[1]/pool_len,      n_features ]
    up3      = NNlayer.deconvolution2d(pool2, cov_ker_size = ( pool_len, pool_len), \
                                            in_n_features = n_features, out_n_features = n_features, \
                                            conv_strides = [1, pool_len, pool_len, 1], activate_type = 'ReLU')
    # input size   [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    # output size  [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, 2*n_features ]
    merge3 = NNlayer.merge(conv2_12, up3, axis=3, merge_type='concat')
    # input size   [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, 2*n_features ]
    # output size  [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    conv3_12 = NNlayer.multi_convolution2d(merge3, cov_ker_size = (pool_len, pool_len), n_cnn_layers = 3, \
                                           in_n_features_arr  = (2*n_features, n_features,  n_features), \
                                           out_n_features_arr = (n_features,   n_features,  n_features), \
                                           pool_type = 'None', activate_type = 'ReLU')
    # input size   [-1, im_shape[0]/pool_len, im_shape[1]/pool_len, n_features ]
    # output size  [-1, im_shape[0],          im_shape[1],          n_features ]
    up4      = NNlayer.deconvolution2d(conv3_12, cov_ker_size = cnn_ksize, \
                                            in_n_features = n_features, out_n_features = n_features, \
                                            conv_strides = [1, pool_len, pool_len, 1], activate_type = 'ReLU')
    # input size   [-1, im_shape[0], im_shape[1], n_features ]
    # output size  [-1, im_shape[0], im_shape[1], 2*n_features ]
    merge4 = NNlayer.merge(conv1_12, up4, axis=3, merge_type='concat')
    # input size   [-1, im_shape[0], im_shape[1], 2*n_features ]
    # output size  [-1, im_shape[0], im_shape[1], 1 ]
    conv4_12 = NNlayer.multi_convolution2d(merge4, cov_ker_size = cnn_ksize, n_cnn_layers = 3, \
                                           in_n_features_arr  = (2*n_features, n_features,  n_features), \
                                           out_n_features_arr = (n_features,  n_features,   1), \
                                           pool_type = 'None', activate_type = 'sigmoid')
    # input data shape [-1,  data_size/4, 1, cnn_n_feature], output data shape [-1, out_size=n_features*data_size//4]
    #y = tf.reshape(conv4_12, [-1, im_shape[0], im_shape[1]]) #flatten
    # softmax output
    return conv4_12  #tf.argmax(conv4_12)