def getModel(x, num_output, K, stages, wd, is_training, transfer_mode= False):
    with tf.variable_scope('conv1'):
        x = common.spatialConvolution(x, 3, 1, 2*K, wd= wd)
 #       x = common.batchNormalization(x, is_training= is_training)
 #       x = tf.nn.relu(x)
   #     x = common.maxPool(x, 3, 2)
    print x    
    with tf.variable_scope('block0'):
        x = block(x, stages[0], K, is_training= is_training,  wd= wd)
    print x
    with tf.variable_scope('trans1'):
        x = transition(x, K, wd= wd, is_training= is_training)    
    print x
    with tf.variable_scope('block2'):
        x = block(x, stages[1], K, is_training= is_training, wd= wd)
    print x
    with tf.variable_scope('trans2'):
        x = transition(x, K, wd= wd, is_training= is_training)    
    print x
    with tf.variable_scope('block3'):
        x = block(x, stages[2], K, is_training= is_training, wd= wd)
    print x
    x = common.avgPool(x,8,1, padding='VALID')

    x= common.flatten(x)

    if not transfer_mode:
      with tf.variable_scope('output'):
        x = common.fullyConnected(x, num_output, wd= wd)
    else:
      with tf.variable_scope('transfer_output'):
        x = common.fullyConnected(x, num_output, wd= wd)

    return x
def transition(x, K, wd, is_training):
    with tf.variable_scope('conv'):
        current = common.batchNormalization(x, is_training=is_training)
        current = tf.nn.relu(current)
        shape = current.get_shape().as_list()
        dim = math.floor(shape[3] * 0.5)
        current = common.spatialConvolution(current, 1, 1, dim, wd=wd)
        current = common.avgPool(current, 2, 2)
    return current
def inference(x, num_output, wd, is_training, transfer_mode=False):
    with tf.variable_scope('block1'):
        network = block(x, [11, 4, 96], wd, is_training)
    network = common.maxPool(network, 3, 2)
    with tf.variable_scope('block2'):
        network = block(network, [5, 1, 256], wd, is_training)
    network = common.maxPool(network, 3, 2)
    with tf.variable_scope('block3'):
        network = block(network, [3, 1, 384], wd, is_training)
    network = common.maxPool(network, 3, 2)
    with tf.variable_scope('block4'):
        network = block(network, [3, 1, 1024], wd, is_training)
    network = common.avgPool(network, 7, 1)
    network = common.flatten(network)
    output = [None] * len(num_output)
    for o in xrange(0, len(num_output)):
        with tf.variable_scope('output' + str(o)):
            output[o] = common.fullyConnected(network, num_output[o], wd=wd)

    return output
def inference(x, num_output, wd, is_training, transfer_mode=False):
    with tf.variable_scope('block1'):
        network = block(x, [11, 4, 96], wd, is_training)
    network = common.maxPool(network, 3, 2)
    with tf.variable_scope('block2'):
        network = block(network, [5, 1, 256], wd, is_training)
    network = common.maxPool(network, 3, 2)
    with tf.variable_scope('block3'):
        network = block(network, [3, 1, 384], wd, is_training)
    network = common.maxPool(network, 3, 2)
    with tf.variable_scope('block4'):
        network = block(network, [3, 1, 1024], wd, is_training)
    network = common.avgPool(network, 7, 1)
    network = common.flatten(network)
    if not transfer_mode:
        with tf.variable_scope('output'):
            network = common.fullyConnected(network, num_output, wd=wd)
    else:
        with tf.variable_scope('transfer_output'):
            network = common.fullyConnected(network, num_output, wd=wd)

    return network
Exemplo n.º 5
0
def inference(x, num_output, wd, dropout_rate, is_training, transfer_mode= False):
  with tf.variable_scope('features'):
    with tf.variable_scope('conv1'):
      network = common.spatialConvolution(x, 7, 2, 64, wd= wd)
      network = common.batchNormalization(network, is_training= is_training)
      network = tf.nn.relu (network)
    network = common.maxPool(network, 3, 2)
    with tf.variable_scope('conv2'):
      network = common.spatialConvolution(network, 1, 1, 64, wd= wd)
      network = common.batchNormalization(network, is_training= is_training)
      network = tf.nn.relu(network)
    with tf.variable_scope('conv3'):
      network = common.spatialConvolution(network, 3, 1, 192, wd= wd)
      network = common.batchNormalization(network, is_training= is_training)
      network = tf.nn.relu(network) 
    network = common.maxPool(network, 3, 2)
    with tf.variable_scope('inception3a'):
      network = inception( network, 64, [96, 128], [16, 32], 32, wd= wd, is_training= is_training)
    with tf.variable_scope('inception3b'):
      network = inception( network, 128, [128, 192], [32, 96], 64, wd= wd, is_training= is_training)
    network = common.maxPool(network, 3, 2)
    with tf.variable_scope('inception4a'):
      network = inception( network, 192, [96, 208], [16, 48], 64, wd= wd, is_training= is_training)
    with tf.variable_scope('inception4b'):
      network = inception( network, 160, [112, 224], [24, 64], 64, wd= wd, is_training= is_training)
    with tf.variable_scope('inception4c'):
      network = inception( network, 128, [128, 256], [24, 64], 64, wd= wd, is_training= is_training)
    with tf.variable_scope('inception4d'):
      network = inception( network, 112, [144, 288], [32, 64], 64, wd= wd, is_training= is_training)

  with tf.variable_scope('mainb'):
    with tf.variable_scope('inception4e'):
      main_branch = inception( network, 256, [160, 320], [32, 128], 128, wd= wd, is_training= is_training) 
    main_branch = common.maxPool(main_branch, 3, 2)
    with tf.variable_scope('inception5a'):
      main_branch= inception(main_branch, 256, [160, 320], [32, 128], 128, wd= wd, is_training= is_training)
    with tf.variable_scope('inception5b'):
      main_branch= inception(main_branch, 384, [192, 384], [48, 128], 128, wd= wd, is_training= is_training)
    main_branch= common.avgPool(main_branch, 7, 1)
    main_branch= common.flatten(main_branch)
    main_branch= tf.nn.dropout(main_branch, dropout_rate)
    if not transfer_mode:
      with tf.variable_scope('output'):
        main_branch= common.fullyConnected(main_branch, num_output, wd= wd)
    else:
      with tf.variable_scope('transfer_output'):
        main_branch= common.fullyConnected(main_branch, num_output, wd= wd)

  with tf.variable_scope('auxb'):
    aux_classifier= common.avgPool(network, 5, 3)
    with tf.variable_scope('conv1'):
      aux_classifier= common.spatialConvolution(aux_classifier, 1, 1, 128, wd= wd)
      aux_classifier= common.batchNormalization(aux_classifier, is_training= is_training)
      aux_classifier= tf.nn.relu(aux_classifier)
    aux_classifier= common.flatten(aux_classifier)
    with tf.variable_scope('fc1'):
      aux_classifier= common.fullyConnected(aux_classifier, 1024, wd= wd)
      aux_classifier= tf.nn.dropout(aux_classifier, dropout_rate)
    if not transfer_mode:
      with tf.variable_scope('output'):
        aux_classifier= common.fullyConnected(aux_classifier, num_output, wd= wd)
    else:
      with tf.variable_scope('transfer_output'):
        aux_classifier= common.fullyConnected(aux_classifier, num_output, wd= wd)
 
  return tf.concat([main_branch, aux_classifier],1)
def getModel(x,
             num_output,
             K,
             stages,
             dropout_rate,
             wd,
             is_training,
             transfer_mode=False):
    print("input", x)
    with tf.variable_scope('conv1'):
        x = common.spatialConvolution(x, 7, 2, 2 * K, wd=wd)
        print("First Conv", x)
        x = common.batchNormalization(x, is_training=is_training)
        x = tf.nn.relu(x)
        x = common.maxPool(x, 3, 2)
        print("First Maxpool", x)

    with tf.variable_scope('block1'):
        x = block(x,
                  stages[0],
                  K,
                  is_training=is_training,
                  dropout_rate=dropout_rate,
                  wd=wd)
        print("block1", x)

    with tf.variable_scope('trans1'):
        x = transition(x,
                       K,
                       dropout_rate=dropout_rate,
                       wd=wd,
                       is_training=is_training)
        print("transition1", x)

    with tf.variable_scope('block2'):
        x = block(x,
                  stages[1],
                  K,
                  is_training=is_training,
                  dropout_rate=dropout_rate,
                  wd=wd)
        print("block2", x)

    with tf.variable_scope('trans2'):
        x = transition(x,
                       K,
                       dropout_rate=dropout_rate,
                       wd=wd,
                       is_training=is_training)
        print("transition2", x)

    with tf.variable_scope('block3'):
        x = block(x,
                  stages[2],
                  K,
                  is_training=is_training,
                  dropout_rate=dropout_rate,
                  wd=wd)
        print("block3", x)

    with tf.variable_scope('trans3'):
        x = transition(x,
                       K,
                       dropout_rate=dropout_rate,
                       wd=wd,
                       is_training=is_training)
        print("transition3", x)

    with tf.variable_scope('block4'):
        x = block(x,
                  stages[3],
                  K,
                  is_training=is_training,
                  dropout_rate=dropout_rate,
                  wd=wd)
        print("block4", x)

    x = common.avgPool(x, 7, 1, padding='VALID')
    print("Last Avg Pool", x)

    x = common.flatten(x)
    print("flatten", x)

    output = [None] * 8
    with tf.variable_scope('output0'):
        output[0] = common.fullyConnected(x, 48, wd=wd)
    with tf.variable_scope('output1'):
        output[1] = common.fullyConnected(x, 12, wd=wd)
    for o in xrange(2, 8):
        with tf.variable_scope('output' + str(o)):
            output[o] = common.fullyConnected(x, 2, wd=wd)

    return output
Exemplo n.º 7
0
def googlenet(dtype=tf.float32,
              batch_size=16,
              dev='gpu',
              width=227,
              height=227,
              g=None):
    x_images = var([batch_size, 3, width, height], "T_NORMAL",
                   dtype=dtype) if dev == 'gpu' else var(
                       [batch_size, width, height, 3], "T_NORMAL", dtype=dtype)

    W_1 = var([7, 7, 3, 64], "T_NORMAL", dtype=dtype)
    b_1 = var([64], "CONSTANT", 0.1, dtype=dtype)
    h_conv1 = conv2D(x_images, W_1, b_1, [1, 1, 2, 2], 'SAME',
                     'NCHW') if dev == 'gpu' else conv2D(
                         x_images, W_1, b_1, [1, 2, 2, 1], 'SAME', 'NHWC')
    print h_conv1
    h_pool1 = mxPool(h_conv1, [1, 1, 3, 3], [1, 1, 2, 2], 'SAME',
                     'NCHW') if dev == 'gpu' else mxPool(
                         h_conv1, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME', 'NHWC')
    print h_pool1

    W_2 = var([1, 1, 64, 64], "T_NORMAL", dtype=dtype)
    b_2 = var([64], "CONSTANT", 0.1, dtype=dtype)
    h_conv2 = conv2D(h_pool1, W_2, b_2, [1, 1, 1, 1], 'SAME',
                     'NCHW') if dev == 'gpu' else conv2D(
                         h_pool1, W_2, b_2, [1, 1, 1, 1], 'SAME', 'NHWC')
    print h_conv2

    W_3 = var([3, 3, 64, 192], "T_NORMAL", dtype=dtype)
    b_3 = var([192], "CONSTANT", 0.1, dtype=dtype)
    h_conv3 = conv2D(h_conv2, W_3, b_3, [1, 1, 1, 1], 'SAME',
                     'NCHW') if dev == 'gpu' else conv2D(
                         h_conv2, W_3, b_3, [1, 1, 1, 1], 'SAME', 'NHWC')
    print h_conv3
    h_pool3 = mxPool(h_conv3, [1, 1, 3, 3], [1, 1, 2, 2], 'SAME',
                     'NCHW') if dev == 'gpu' else mxPool(
                         h_conv3, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME', 'NHWC')
    print h_pool3

    op_list = []
    op_list.append(['conv', 192, 64, [1, 1], [1, 1], 'SAME'])
    #op_list.append(['conv',192,96,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 192, 128, [3, 3], [1, 1], 'SAME'])
    #op_list.append(['conv',192,16,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 192, 32, [5, 5], [1, 1], 'SAME'])
    #op_list.append(['mxpool',192,None,[3,3],[1,1],'SAME'])
    op_list.append(['conv', 192, 32, [1, 1], [1, 1], 'SAME'])
    incept_1 = inception_("incept_v1", op_list, h_pool3, dtype, dev)
    print incept_1

    op_list = []
    op_list.append(['conv', 256, 128, [1, 1], [1, 1], 'SAME'])
    #op_list.append(['conv',256,128,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 256, 192, [3, 3], [1, 1], 'SAME'])
    #op_list.append(['conv',256,32,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 256, 96, [5, 5], [1, 1], 'SAME'])
    #op_list.append(['mxpool',256,None,[3,3],[1,1],'SAME'])
    op_list.append(['conv', 256, 64, [1, 1], [1, 1], 'SAME'])
    incept_2 = inception_("incept_v1", op_list, incept_1, dtype, dev)
    print incept_2

    h_pool5 = mxPool(incept_2, [1, 1, 3, 3], [1, 1, 2, 2], 'SAME',
                     'NCHW') if dev == 'gpu' else mxPool(
                         incept_2, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME', 'NHWC')
    print h_pool5

    op_list = []
    op_list.append(['conv', 480, 192, [1, 1], [1, 1], 'SAME'])
    #op_list.append(['conv',480,96,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 480, 208, [3, 3], [1, 1], 'SAME'])
    #op_list.append(['conv',480,16,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 480, 48, [5, 5], [1, 1], 'SAME'])
    #op_list.append(['mxpool',480,None,[3,3],[1,1],'SAME'])
    op_list.append(['conv', 480, 64, [1, 1], [1, 1], 'SAME'])
    incept_3 = inception_("incept_v1", op_list, h_pool5, dtype, dev)
    print incept_3

    op_list = []
    op_list.append(['conv', 512, 160, [1, 1], [1, 1], 'SAME'])
    #op_list.append(['conv',512,112,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 512, 224, [3, 3], [1, 1], 'SAME'])
    #op_list.append(['conv',512,24,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 512, 64, [5, 5], [1, 1], 'SAME'])
    #op_list.append(['mxpool',512,None,[3,3],[1,1],'SAME'])
    op_list.append(['conv', 512, 64, [1, 1], [1, 1], 'SAME'])
    incept_4 = inception_("incept_v1", op_list, incept_3, dtype, dev)
    print incept_4

    op_list = []
    op_list.append(['conv', 512, 128, [1, 1], [1, 1], 'SAME'])
    #op_list.append(['conv',512,128,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 512, 256, [3, 3], [1, 1], 'SAME'])
    #op_list.append(['conv',512,24,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 512, 64, [5, 5], [1, 1], 'SAME'])
    #op_list.append(['mxpool',512,None,[3,3],[1,1],'SAME'])
    op_list.append(['conv', 512, 64, [1, 1], [1, 1], 'SAME'])
    incept_5 = inception_("incept_v1", op_list, incept_4, dtype, dev)
    print incept_5

    op_list = []
    op_list.append(['conv', 512, 112, [1, 1], [1, 1], 'SAME'])
    #op_list.append(['conv',512,144,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 512, 288, [3, 3], [1, 1], 'SAME'])
    #op_list.append(['conv',512,32,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 512, 64, [5, 5], [1, 1], 'SAME'])
    #op_list.append(['mxpool',512,None,[3,3],[1,1],'SAME'])
    op_list.append(['conv', 512, 64, [1, 1], [1, 1], 'SAME'])
    incept_6 = inception_("incept_v1", op_list, incept_5, dtype, dev)
    print incept_6

    op_list = []
    op_list.append(['conv', 528, 256, [1, 1], [1, 1], 'SAME'])
    #op_list.append(['conv',528,160,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 528, 320, [3, 3], [1, 1], 'SAME'])
    #op_list.append(['conv',528,32,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 528, 128, [5, 5], [1, 1], 'SAME'])
    #op_list.append(['mxpool',528,None,[3,3],[1,1],'SAME'])
    op_list.append(['conv', 528, 128, [1, 1], [1, 1], 'SAME'])
    incept_7 = inception_("incept_v1", op_list, incept_6, dtype, dev)
    print incept_7

    h_pool6 = mxPool(incept_7, [1, 1, 3, 3], [1, 1, 2, 2], 'SAME',
                     'NCHW') if dev == 'gpu' else mxPool(
                         incept_7, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME', 'NHWC')
    print h_pool6

    op_list = []
    op_list.append(['conv', 832, 256, [1, 1], [1, 1], 'SAME'])
    #op_list.append(['conv',832,160,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 832, 320, [3, 3], [1, 1], 'SAME'])
    #op_list.append(['conv',832,32,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 832, 128, [5, 5], [1, 1], 'SAME'])
    #op_list.append(['mxpool',528,None,[3,3],[1,1],'SAME'])
    op_list.append(['conv', 832, 128, [1, 1], [1, 1], 'SAME'])
    incept_8 = inception_("incept_v1", op_list, h_pool6, dtype, dev)
    print incept_8

    op_list = []
    op_list.append(['conv', 832, 384, [1, 1], [1, 1], 'SAME'])
    #op_list.append(['conv',832,192,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 832, 384, [3, 3], [1, 1], 'SAME'])
    #op_list.append(['conv',832,48,[1,1],[1,1],'SAME'])
    op_list.append(['conv', 832, 128, [5, 5], [1, 1], 'SAME'])
    #op_list.append(['mxpool',528,None,[3,3],[1,1],'SAME'])
    op_list.append(['conv', 832, 128, [1, 1], [1, 1], 'SAME'])
    incept_9 = inception_("incept_v1", op_list, incept_8, dtype, dev)
    print incept_9

    h_pool7 = avgPool(incept_9, [1, 1, 7, 7], [1, 1, 1, 1], 'VALID',
                      'NCHW') if dev == 'gpu' else mxPool(
                          incept_9, [1, 7, 7, 1], [1, 1, 1, 1], 'VALID',
                          'NHWC')
    print h_pool7

    y = tf.reshape(h_pool7, [-1, 1024])
    print y

    op_list = [op.name for op in g.get_operations()]
    flop_list = [
        ops.get_stats_for_node_def(g, op.node_def, 'flops').value
        if ops.get_stats_for_node_def(g, op.node_def, 'flops').value != None
        else 0 for op in g.get_operations()
    ]

    if dev == 'cpu':
        config = tf.ConfigProto(graph_options=tf.GraphOptions(
            optimizer_options=tf.OptimizerOptions(
                opt_level=tf.OptimizerOptions.L0)),
                                device_count={'GPU': 0})
    else:
        config = tf.ConfigProto(graph_options=tf.GraphOptions(
            optimizer_options=tf.OptimizerOptions(
                opt_level=tf.OptimizerOptions.L0)))

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(y.op)
        iter = 100

        start = time()
        for i in range(iter):
            if (i + 1) % 10 == 0:
                print "<", i / 10, ">"
            sess.run(y.op)
        elapsed = time() - start
        print "frames/sec:", iter * batch_size / elapsed
        print "flops:", sum(flop_list)