def shallow2(inpt, inpt_size, is_training):
    '''
        18-ticnn
    '''
    end_points = {}
    keep_prob = tf.cond(is_training, lambda: tf.constant(0.5),
                        lambda: tf.constant(1.0))
    with tf.name_scope('reshape'):
        x_image = tf.reshape(inpt, [-1, inpt_size, 1, 1])

    ## first conv-----------------------------
    num_feature = 16
    out = tu.add_conv1d_layer(x_image,
                              num_feature,
                              9,
                              2,
                              is_training=is_training,
                              layer_name='conv1')
    end_points['conv1'] = out
    out = tu.batch_normalization(out, is_training=is_training)
    out = tu.max_pool(out, ksize=4, stride=2, layer_name='pool1')
    end_points['pool1'] = out
    tu.print_activations(out)

    out = tu.add_conv1d_layer(out,
                              num_feature,
                              9,
                              2,
                              is_training=is_training,
                              layer_name='conv2')
    end_points['conv2'] = out
    out = tu.max_pool(out, ksize=4, stride=2, layer_name='pool2')
    end_points['pool2'] = out
    tu.print_activations(out)

    ## fully connected layers-----------------------------
    with tf.name_scope('fc1'):
        out = tu.add_fc_layer(out,
                              100,
                              relu=True,
                              BN=True,
                              is_training=is_training)  # previously 256 nodes
        out = tf.nn.dropout(out, keep_prob)
    with tf.name_scope('fc2-1'):
        out1 = tu.add_fc_layer(out, 3)

    with tf.name_scope('fc2-2'):
        print(end_points.keys())
        out2 = tu.add_fc_layer(out, 1)

    end_points['class_end'] = out1
    end_points['speed_end'] = out2
    return end_points
def speednet1(inpt, inpt_size,
                is_training):
    keep_prob = tf.cond(is_training, lambda: tf.constant(0.5), lambda: tf.constant(1.0))
    end_point={}
    with tf.name_scope('reshape'):
        x_image = tf.reshape(inpt, [-1, inpt_size,1, 1])

    with tf.name_scope('block1'):
        out = tu.add_conv1d_layer(x_image,32,9,3,is_training=is_training)
        out = tu.max_pool(out, ksize=4)
    end_point['block1'] = out   
    
    with tf.name_scope('block8'):
        out2 = tu.add_fc_layer(out, 128, relu=True, BN=True,
                                        is_training=is_training)
        out2 = tf.nn.dropout(out2, keep_prob)
    with tf.name_scope('block9'):
        out2 = tu.add_fc_layer(out2, 1)

    with tf.name_scope('block2'):
        out = tu.add_conv1d_layer(out,64,9,is_training=is_training)
        out = tu.max_pool(out, ksize=4)
    end_point['block2'] = out 
    
    with tf.name_scope('block3'):
        out = tu.add_conv1d_layer(out,64,9,is_training=is_training)
        out = tu.max_pool(out, ksize=4)
        
    with tf.name_scope('block4'):
        out = tu.add_conv1d_layer(out,64,9,is_training=is_training)
        out = tu.max_pool(out, ksize=4)
        
#    with tf.name_scope('block5'):
#        out = tu.add_conv1d_layer(out,64,9,is_training=is_training)
#        out = tu.max_pool(out, ksize=4)
        
    with tf.name_scope('block6'):
        out1 = tu.add_fc_layer(out, 256, relu=True, BN=True,
                                        is_training=is_training)
        out1 = tf.nn.dropout(out1, keep_prob)

    with tf.name_scope('block7'):
        out1 = tu.add_fc_layer(out1, 3)
    
    end_point['class_end'] = out1
    end_point['speed_end'] = out2
    return end_point
def simpnet2(inpt, inpt_size, is_training):
    keep_prob = tf.cond(is_training, lambda: tf.constant(0.5),
                        lambda: tf.constant(1.0))
    with tf.name_scope('reshape'):
        x_image = tf.reshape(inpt, [-1, inpt_size, 1, 1])

    with tf.name_scope('block1'):
        out = tu.add_conv1d_layer(x_image, 32, 64, is_training=is_training)
        out = tu.max_pool(out)

    with tf.name_scope('block2'):
        out = tu.add_conv1d_layer(out, 64, 9, is_training=is_training)
        out = tu.max_pool(out)

    with tf.name_scope('block3'):
        out = tu.add_conv1d_layer(out, 64, 9, is_training=is_training)
        out = tu.max_pool(out)

    with tf.name_scope('block4'):
        out = tu.add_conv1d_layer(out, 64, 9, is_training=is_training)
        out = tu.max_pool(out)

    with tf.name_scope('block5'):
        out = tu.add_conv1d_layer(out, 64, 9, is_training=is_training)
        out = tu.max_pool(out)

    with tf.name_scope('block6'):
        out = tu.add_fc_layer(out,
                              128,
                              relu=True,
                              BN=True,
                              is_training=is_training)
        out = tf.nn.dropout(out, keep_prob)

    with tf.name_scope('block7'):
        y_conv = tu.add_fc_layer(out, 3)
    return y_conv
示例#4
0
def cAlex(inpt, inpt_size, is_training):
    keep_prob = tf.cond(is_training, lambda: tf.constant(0.5),
                        lambda: tf.constant(1.0))
    with tf.name_scope('reshape'):
        x_image = tf.reshape(inpt, [-1, inpt_size, 1, 1])

    ## first conv-----------------------------
    with tf.name_scope('conv1'):
        conv1_out = tu.add_conv1d_layer(x_image,
                                        16,
                                        kernel_size=64,
                                        stride=16,
                                        is_training=is_training)
        # output size = INPUT_SIZE/stride_size=256 --->[-1,256,1,num_feature1]
        # this formula is for 'padding="SAME"' situation
    with tf.name_scope('pool1'):
        h_pool1 = tu.max_pool(conv1_out, ksize=3, stride=2, padding='VALID')
        tu.print_activations(h_pool1)
        # output size: (255-3+1)/2=127(ceiling) --->[-1,127,1,num_feature1]

    ## second conv-----------------------------
    with tf.name_scope('conv2'):
        conv2_out = tu.add_conv1d_layer(h_pool1,
                                        32,
                                        kernel_size=25,
                                        is_training=is_training)
    with tf.name_scope('pool2'):
        h_pool2 = tu.max_pool(conv2_out, ksize=3, stride=2, padding='VALID')
        tu.print_activations(h_pool2)
        # output size: (127-3+1)/2=63 ---> [-1,63,1,num_feature2]

    ## third conv-----------------------------
    with tf.name_scope('conv3'):
        conv3_out = tu.add_conv1d_layer(h_pool2,
                                        32,
                                        9,
                                        print_activation=False,
                                        is_training=is_training)
    with tf.name_scope('conv4'):
        conv4_out = tu.add_conv1d_layer(conv3_out,
                                        32,
                                        9,
                                        print_activation=False,
                                        is_training=is_training)
    with tf.name_scope('conv5'):
        conv5_out = tu.add_conv1d_layer(conv4_out,
                                        25,
                                        9,
                                        print_activation=False,
                                        is_training=is_training)
    with tf.name_scope('pool3'):
        h_pool3 = tu.max_pool(conv5_out, ksize=3, stride=2, padding='VALID')
        tu.print_activations(h_pool3)
        # output size: (63-3+1)/2=31 ---> [-1,31,1,num_feature5]

    ## fully connected layers-----------------------------
    with tf.name_scope('fc1'):
        fc1_out = tu.add_fc_layer(h_pool3,
                                  128,
                                  relu=True,
                                  BN=True,
                                  is_training=is_training)

    with tf.name_scope('dropout1'):
        h_fc1_drop = tf.nn.dropout(fc1_out, keep_prob)

    with tf.name_scope('fc2'):
        fc1_out = tu.add_fc_layer(h_fc1_drop,
                                  128,
                                  relu=True,
                                  BN=True,
                                  is_training=is_training)

    with tf.name_scope('dropout2'):
        h_fc2_drop = tf.nn.dropout(fc1_out, keep_prob)

    with tf.name_scope('fc3'):
        y_conv = tu.add_fc_layer(h_fc2_drop, 3)
    return y_conv
def cvgg19_with_pindex(inpt, inpt_size, is_training):

    end_points = {}
    keep_prob = tf.cond(is_training, lambda: tf.constant(0.5),
                        lambda: tf.constant(1.0))
    with tf.name_scope('reshape'):
        x_image = tf.reshape(inpt, [-1, inpt_size, 1, 1])

    ## first conv-----------------------------
    with tf.name_scope('block1'):
        num_feature = 32
        out = tu.add_conv1d_layer(x_image,
                                  num_feature,
                                  9,
                                  2,
                                  BN=False,
                                  layer_name='conv1')
        end_points['conv1'] = out
        out = tu.add_conv1d_layer(out,
                                  num_feature,
                                  9,
                                  2,
                                  BN=False,
                                  layer_name='conv2')
        end_points['conv2'] = out
        out = tu.batch_normalization(out, is_training=is_training)
        out, pool_args = tu.max_pool_with_argmax(out, 4, layer_name='pool1')
        end_points['pool1'] = out
        end_points['pool1_arg'] = pool_args
        tu.print_activations(out)

    ## second conv-----------------------------
    with tf.name_scope('block2'):
        num_feature = num_feature * 2
        out = tu.add_conv1d_layer(out,
                                  num_feature,
                                  9,
                                  BN=False,
                                  layer_name='conv3')
        end_points['conv3'] = out
        out = tu.add_conv1d_layer(out,
                                  num_feature,
                                  9,
                                  is_training=is_training,
                                  layer_name='conv4',
                                  print_activation=True)
        end_points['conv4'] = out
        out, pool_args = tu.max_pool_with_argmax(out, 4, layer_name='pool2')
        end_points['pool2_arg'] = pool_args
        end_points['pool2'] = out
        tu.print_activations(out)

    ## third conv-----------------------------
    with tf.name_scope('block3'):
        num_feature = num_feature * 2
        out = tu.add_conv1d_layer(out,
                                  num_feature,
                                  9,
                                  BN=False,
                                  layer_name='conv6')
        end_points['conv6'] = out
        out = tu.add_conv1d_layer(out,
                                  num_feature,
                                  9,
                                  BN=False,
                                  layer_name='conv7')
        end_points['conv7'] = out
        out = tu.add_conv1d_layer(out,
                                  num_feature,
                                  9,
                                  is_training=is_training,
                                  layer_name='conv8',
                                  print_activation=True)
        end_points['conv8'] = out
        out, pool_args = tu.max_pool_with_argmax(out, 4, layer_name='pool3')
        end_points['pool3_arg'] = pool_args
        end_points['pool3'] = out
        tu.print_activations(out)

    ## forth conv-----------------------------
    with tf.name_scope('block4'):
        out = tu.add_conv1d_layer(out,
                                  num_feature,
                                  9,
                                  BN=False,
                                  layer_name='conv10')
        end_points['conv10'] = out
        out = tu.add_conv1d_layer(out,
                                  num_feature,
                                  9,
                                  BN=False,
                                  layer_name='conv11')
        end_points['conv11'] = out
        out = tu.add_conv1d_layer(out,
                                  num_feature,
                                  9,
                                  is_training=is_training,
                                  layer_name='conv12',
                                  print_activation=True)
        end_points['conv12'] = out
        out, pool_args = tu.max_pool_with_argmax(out, 4, layer_name='pool4')
        end_points['pool4_arg'] = pool_args
        # out = tu.global_average_pool(out)
        end_points['pool4'] = out
        tu.print_activations(out)

    ## fully connected layers-----------------------------
    with tf.name_scope('fc1'):
        out = tu.add_fc_layer(out,
                              256,
                              relu=True,
                              BN=True,
                              is_training=is_training)
        out = tf.nn.dropout(out, keep_prob)
    with tf.name_scope('fc4'):
        out = tu.add_fc_layer(out, 3)

    with tf.name_scope('block8'):
        print(end_points.keys())
        out2 = tu.add_fc_layer(end_points['pool4'],
                               256,
                               relu=True,
                               BN=True,
                               is_training=is_training)
        out2 = tf.nn.dropout(out2, keep_prob)
    with tf.name_scope('block9'):
        out2 = tu.add_fc_layer(out2, 1)

    end_points['class_end'] = out
    end_points['speed_end'] = out2
    return end_points
def sphere_net20(inpt, inpt_size, is_training):
    end_points = {}
    keep_prob = tf.cond(is_training, lambda: tf.constant(0.5),
                        lambda: tf.constant(1.0))
    with tf.name_scope('reshape'):
        inpt = tf.reshape(inpt, [-1, inpt_size, 1, 1])

    ## first res-----------------------------
    num_feature = 16
    out = res_block(inpt, num_feature, name_scope='block1', down_sample=True)
    end_points['block1'] = out
    tu.print_activations(out)

    ## second res-----------------------------
    num_feature *= 2
    out = res_block(out, num_feature, name_scope='block2', down_sample=True)
    end_points['block2'] = out
    out = res_block(out, num_feature, name_scope='block3', down_sample=False)
    end_points['block3'] = out
    tu.print_activations(out)

    # ## third res-----------------------------
    # num_feature *= 2
    # out = res_block(out, num_feature, name_scope='block4', down_sample=True)
    # end_points['block4']=out
    # out = res_block(out, num_feature, name_scope='block5', down_sample=False)
    # end_points['block5']=out
    # out = res_block(out, num_feature, name_scope='block6', down_sample=False)
    # end_points['block6']=out
    # out = res_block(out, num_feature, name_scope='block7', down_sample=False)
    # end_points['block7']=out
    # tu.print_activations(out)

    ## forth conv-----------------------------
    num_feature *= 2
    out = res_block(out, num_feature, name_scope='block8', down_sample=True)
    end_points['block8'] = out
    tu.print_activations(out)

    ## fully connected layers-----------------------------
    with tf.name_scope('fc1_1'):
        out = tu.add_fc_layer(out,
                              4096,
                              relu=True,
                              BN=False,
                              is_training=is_training)
        out = tf.nn.dropout(out, keep_prob)
    with tf.name_scope('fc1_2'):
        out = tu.add_fc_layer(out, 3)
    end_points['class_end'] = out

    with tf.name_scope('fc2_1'):
        print(end_points.keys())
        out = tu.add_fc_layer(end_points['block8'],
                              256,
                              relu=True,
                              BN=True,
                              is_training=is_training)
        out = tf.nn.dropout(out, keep_prob)
    with tf.name_scope('fc2_2'):
        out = tu.add_fc_layer(out, 1)
    end_points['speed_end'] = out

    return end_points
示例#7
0
def cvgg19_2(inpt, inpt_size, is_training):

    end_points = {}
    keep_prob = tf.cond(is_training, lambda: tf.constant(0.5),
                        lambda: tf.constant(1.0))
    with tf.name_scope('reshape'):
        x_image = tf.reshape(inpt, [-1, inpt_size, 1, 1])

    ## first conv-----------------------------
    num_feature = 16
    out = tu.add_conv1d_layer(x_image,
                              num_feature,
                              9,
                              2,
                              BN=False,
                              layer_name='conv1')
    end_points['conv1'] = out
    out = tu.add_conv1d_layer(out,
                              num_feature,
                              9,
                              2,
                              BN=False,
                              layer_name='conv2')
    end_points['conv2'] = out
    out = tu.batch_normalization(out, is_training=is_training)
    out = tu.max_pool(out, ksize=4, layer_name='pool1')
    end_points['pool1'] = out

    tu.print_activations(out)

    ## second conv-----------------------------
    num_feature = num_feature * 2
    out = tu.add_conv1d_layer(out,
                              num_feature,
                              9,
                              BN=False,
                              layer_name='conv3')
    end_points['conv3'] = out

    out = tu.add_conv1d_layer(out,
                              num_feature,
                              9,
                              is_training=is_training,
                              layer_name='conv4',
                              print_activation=True)
    end_points['conv4'] = out
    out = tu.max_pool(out, ksize=4, layer_name='pool2')
    end_points['pool2'] = out
    tu.print_activations(out)

    ## third conv-----------------------------
    num_feature = num_feature * 2
    out = tu.add_conv1d_layer(out,
                              num_feature,
                              9,
                              BN=False,
                              layer_name='conv6')
    end_points['conv5'] = out
    out = tu.add_conv1d_layer(out,
                              num_feature,
                              9,
                              BN=False,
                              layer_name='conv7')
    end_points['conv6'] = out
    out = tu.add_conv1d_layer(out,
                              num_feature,
                              9,
                              is_training=is_training,
                              layer_name='conv8',
                              print_activation=True)
    end_points['conv7'] = out
    out = tu.max_pool(out, ksize=4, layer_name='pool3')
    end_points['pool3'] = out
    tu.print_activations(out)

    ## forth conv-----------------------------
    out = tu.add_conv1d_layer(out,
                              num_feature,
                              9,
                              BN=False,
                              layer_name='conv10')
    end_points['conv8'] = out
    out = tu.add_conv1d_layer(out,
                              num_feature,
                              9,
                              BN=False,
                              layer_name='conv11')
    end_points['conv9'] = out
    out = tu.add_conv1d_layer(out,
                              num_feature,
                              9,
                              is_training=is_training,
                              layer_name='conv12',
                              print_activation=True)
    end_points['conv10'] = out
    # out = tu.max_pool(out, ksize=4, layer_name='pool4')
    out = tu.global_average_pool(out)
    end_points['GAP'] = out
    tu.print_activations(out)

    ## fully connected layers-----------------------------
    with tf.name_scope('fc1'):
        out = tu.add_fc_layer(out,
                              256,
                              relu=True,
                              BN=True,
                              is_training=is_training)
        out = tf.nn.dropout(out, keep_prob)
    with tf.name_scope('fc2-1'):
        out1 = tu.add_fc_layer(out, 3)

    with tf.name_scope('fc2-2'):
        out2 = tu.add_fc_layer(out, 1)

    end_points['class_end'] = out1
    end_points['speed_end'] = out2
    return end_points