コード例 #1
0
def extract_convmat_resnet():
    img_ph = tf.placeholder(tf.float32, [None, 224, 224, 3])

    with arg_scope(resnet_v1.resnet_arg_scope()):
        net, end_points = resnet_v1.resnet_v1_50(img_ph,
                                                 1000,
                                                 is_training=False)
        saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, args.checkpoint_path)
        names = [v.name for v in tf.global_variables() if 'conv' in v.name]
        names = set(['/'.join(name.split('/')[:-1]) for name in names])
        plotting(names, shape=m_size)
コード例 #2
0
def basenetwork(input_tensor, name='resnet50', dilation=False):
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        if name == 'resnet50':
            net, end_points = resnet_v1.resnet_v1_50(input_tensor,
                                                     global_pool=False)
            for key in end_points.keys():
                print key, end_points[key]
            new_end_points = {}
            new_end_points['stage0'] = end_points[
                'resnet_v1_50/block1/unit_2/bottleneck_v1']  # 128 128 4
            new_end_points['stage1'] = end_points[
                'resnet_v1_50/block1']  # 64 64 8
            new_end_points['stage2'] = end_points[
                'resnet_v1_50/block2']  # 32 32 16
            new_end_points['stage3'] = end_points['final_feature']  # 16 16 32
            return net, new_end_points
コード例 #3
0
ファイル: quantizer.py プロジェクト: ustcdane/NNQuantization
def perform_quantization(resnet_model, checkpoint_file_path, classes,
                         quantization_algorithm, quantization_bits):
    """
    Loads the TensorFlow Slim model and variables contained in the checkpoint file, and then performs a quantization and
    dequantization of trainable variables (i.e. weights and batch norms). Finally, new values of the trainable variables
    are saved in a checkpoint file, which may be used to perform inference with it.
    """

    with tf.Graph().as_default():
        global_step = tf.train.get_or_create_global_step(
        )  # needed by the Saver mechanism

        # RESNET V1 50 MODEL
        if resnet_model == 50:
            image_size = resnet_v1.resnet_v1_50.default_image_size
            with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                resnet_v1.resnet_v1_50(np.zeros(
                    (64, image_size, image_size, 3), np.float32),
                                       num_classes=classes,
                                       is_training=False)
        # RESNET V1 101 MODEL
        elif resnet_model == 101:
            image_size = resnet_v1.resnet_v1_101.default_image_size
            with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                resnet_v1.resnet_v1_101(np.zeros(
                    (64, image_size, image_size, 3), np.float32),
                                        num_classes=classes,
                                        is_training=False)
        # NOT RECOGNIZED MODEL
        else:
            raise Exception('Model type not recognized.')

        init_function = slim.assign_from_checkpoint_fn(
            checkpoint_file_path, slim.get_model_variables())

        with tf.Session() as session:
            # INITIALIZATION
            init_function(session)
            session.run(global_step.initializer)

            # GETTING ALL TRAINABLE VARIABLES - WEIGHTS AND BATCH NORMS
            variables = tf.trainable_variables()

            # RECORDING QUANTIZATION ERRORS
            quantization_errors = []

            # SAVER OPTIONS
            checkpoint_saver = tf.train.Saver()
            filename_position = checkpoint_file_path.find(
                checkpoint_file_path.rsplit('/')[-1])

            # BIN QUANTIZATION ALGORITHM
            if quantization_algorithm == 1:
                for variable_number, variable in enumerate(variables):
                    values = session.run(variable)

                    # BIN QUANTIZATION
                    quantized_data, min_value, max_value = bin_quantization \
                        .quantize(values.reshape(-1), quantization_bits)

                    # BIN DEQUANTIZATION
                    dequantized_data = bin_quantization \
                        .dequantize(quantized_data, quantization_bits, min_value, max_value) \
                        .reshape(values.shape)

                    # VARIABLE UPDATE and ERRORS SAVING
                    session.run(variable.assign(dequantized_data))
                    quantization_errors.append(
                        calculate_relative_error(dequantized_data, values))

                    # LOGGING
                    if (variable_number + 1) % 20 == 0 or (
                            variable_number + 1) == len(variables):
                        print('### Processed variables: {}/{}'.format(
                            variable_number + 1, len(variables)))

                # SAVING UPDATED MODEL
                output_file_path = checkpoint_file_path[:filename_position] + 'bin_{}bits/'.format(quantization_bits) + \
                                   checkpoint_file_path[filename_position:]

                saving_path = checkpoint_saver.save(session,
                                                    output_file_path,
                                                    global_step=global_step)
                print('Quantized variables saved under: {}\n'.format(
                    saving_path))

            # FIXED-POINT QUANTIZATION ALGORITHM
            elif quantization_algorithm == 2:
                for variable_number, variable in enumerate(variables):
                    values = session.run(variable)

                    # FIXED-POINT QUANTIZATION
                    quantized_data, shift_positions, fractional_part_width, has_sign = fixed_point_quantization \
                        .quantize(values.reshape(-1), quantization_bits)

                    # FIXED-POINT DEQUANTIZATION
                    dequantized_data = fixed_point_quantization \
                        .dequantize(quantized_data, shift_positions, fractional_part_width, has_sign) \
                        .reshape(values.shape)

                    # VARIABLE UPDATE and ERRORS SAVING
                    session.run(variable.assign(dequantized_data))
                    quantization_errors.append(
                        calculate_relative_error(dequantized_data, values))

                    # LOGGING
                    if (variable_number + 1) % 20 == 0 or (
                            variable_number + 1) == len(variables):
                        print('### Processed variables: {}/{}'.format(
                            variable_number + 1, len(variables)))

                # SAVING UPDATED MODEL
                output_file_path = checkpoint_file_path[:filename_position] + \
                                   'fixed-point_{}bits/'.format(quantization_bits) + \
                                   checkpoint_file_path[filename_position:]

                saving_path = checkpoint_saver.save(session,
                                                    output_file_path,
                                                    global_step=global_step)
                print('Quantized variables saved under: {}\n'.format(
                    saving_path))

            # NOT RECOGNIZED ALGORITHM
            else:
                raise Exception('Quantization algorithm type not recognized.')

            # ERRORS SUMMARY
            quantization_errors = np.array(quantization_errors)
            print('Min error: {:.2f}\nMax error: {:.2f}\nAvg error: {:.2f}'.
                  format(quantization_errors.min(), quantization_errors.max(),
                         quantization_errors.mean()))
コード例 #4
0
def build_temporal(x, reuse, temporal_length, mode, batch_size):
    with tf.device('/gpu:1'):
        shape = x.get_shape().as_list()
        x = tf.reshape(x,
                       [batch_size * shape[1], shape[2], shape[3], shape[4]])

        # cropped_image = list()
        # for i in range(shape[0]):
        #     for b in boxes[i]:
        #         # original frame
        #         original = tf.reshape(x[i], [shape[1]*shape[2], shape[3], shape[4], shape[5]])
        #         cropped_image.append(tf.image.crop_and_resize(original, boxes=[b for _ in range(temporal_length*shape[2])],
        #                                                       box_ind=[j for j in range(temporal_length*shape[2])],
        #                                                       crop_size=[224, 224]))
        #         # flipped frame
        #         flipped = tf.image.flip_left_right(original)
        #         cropped_image.append(
        #             tf.image.crop_and_resize(flipped, boxes=[b for _ in range(temporal_length*shape[2])],
        #                                      box_ind=[j for j in range(temporal_length*shape[2])],
        #                                      crop_size=[224, 224]))
        # x = tf.stack(cropped_image)
        # shape = x.get_shape().as_list()
        # x = tf.reshape(x, [shape[0] * shape[1], shape[2], shape[3], shape[4]])

        with tf.variable_scope('ConvNet_t', reuse=reuse):
            with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                resNet152, end_points = resnet_v1.resnet_v1_50(
                    x,
                    num_classes=None,
                    is_training=(mode == tf.estimator.ModeKeys.TRAIN),
                    global_pool=True,
                    output_stride=None,
                    spatial_squeeze=True,
                    reuse=None,
                    scope='resnet_v1_152')

            shape = resNet152.get_shape().as_list()
            input_flow = tf.reshape(resNet152, [
                int(shape[0] / 20 / temporal_length), 20 * temporal_length,
                2048, 1
            ])

            # input_flow = tf.transpose(input_flow, [0, 1, 3, 2])
            # input_flow = tf.reshape(input_flow, [-1, params['flow_feature_shape'][2], 2048 * 2, 1])
            # input_flow = tf.reshape(input_flow, [-1, params['flow_feature_shape'][2]*2, 2048, 1])
            conv4 = tf.layers.conv2d(
                inputs=input_flow,
                filters=16,
                kernel_initializer=tf.contrib.layers.xavier_initializer(),
                use_bias=False,
                kernel_size=[20, 1],
                padding="same",
                activation=None,
                strides=[20, 1],
                name='conv4')
            conv_bn4 = tf.layers.batch_normalization(
                conv4,
                training=(mode == tf.estimator.ModeKeys.TRAIN),
                name='conv_bn4')
            conv_act4 = tf.nn.relu(conv_bn4, name="conv_act4")

            conv4_a = tf.layers.conv2d(
                inputs=conv_act4,
                filters=16,
                kernel_initializer=tf.contrib.layers.xavier_initializer(),
                use_bias=False,
                kernel_size=[temporal_length, 1],
                padding="same",
                activation=None,
                strides=[temporal_length, 1],
                name='conv4_a_t')
            conv_bn4_a = tf.layers.batch_normalization(
                conv4_a,
                training=(mode == tf.estimator.ModeKeys.TRAIN),
                name='conv_bn4_a')
            conv_act4_a = tf.nn.relu(conv_bn4_a, name="conv_act4_a_t")
            conv_act4_a = tf.transpose(conv_act4_a, [0, 3, 2, 1])

            conv5 = tf.layers.conv2d(
                inputs=conv_act4_a,
                filters=8,
                kernel_initializer=tf.contrib.layers.xavier_initializer(),
                use_bias=False,
                kernel_size=[1, 1],
                padding="same",
                activation=None,
                strides=[1, 1],
                name='conv5_t')
            conv_bn5 = tf.layers.batch_normalization(
                conv5,
                training=(mode == tf.estimator.ModeKeys.TRAIN),
                name='conv_bn5_t')
            conv_act5 = tf.nn.relu(conv_bn5, name="conv_act5_t")

            conv6 = tf.layers.conv2d(
                inputs=conv_act5,
                filters=4,
                kernel_initializer=tf.contrib.layers.xavier_initializer(),
                use_bias=False,
                kernel_size=[16, 1],
                padding="same",
                activation=None,
                strides=[16, 1],
                name='conv6_t')
            conv_bn6 = tf.layers.batch_normalization(
                conv6,
                training=(mode == tf.estimator.ModeKeys.TRAIN),
                name='conv_bn6_t')
            conv_act6 = tf.nn.relu(conv_bn6, name="conv_act6_t")
            return conv_act6
コード例 #5
0
def build_spatial(x, reuse, temporal_length, mode, batch_size):
    with tf.device('/gpu:1'):
        # data argumentation * 10
        shape = x.get_shape().as_list()
        x = tf.reshape(x,
                       [batch_size * shape[1], shape[2], shape[3], shape[4]])

        # cropped_image = list()
        # for i in range(shape[0]):
        #     for b in boxes[i]:
        #         # original frame
        #         cropped_image.append(tf.image.crop_and_resize(x[i], boxes=[b for _ in range(temporal_length)],
        #                              box_ind=[j for j in range(temporal_length)], crop_size=[224, 224]))
        #         # flipped frame
        #         cropped_image.append(tf.image.crop_and_resize(tf.image.flip_left_right(x[i]), boxes=[b for _ in range(temporal_length)],
        #                                                       box_ind=[j for j in range(temporal_length)],
        #                                                       crop_size=[224, 224]))
        # x = tf.stack(cropped_image)
        # shape = x.get_shape().as_list()
        # x = tf.reshape(x, [shape[0]*shape[1], shape[2], shape[3], shape[4]])

        with tf.variable_scope('ConvNet_s', reuse=reuse):
            with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                resNet152, end_points = resnet_v1.resnet_v1_50(
                    x,
                    num_classes=None,
                    is_training=(mode == tf.estimator.ModeKeys.TRAIN),
                    global_pool=True,
                    output_stride=None,
                    spatial_squeeze=True,
                    reuse=None,
                    scope='resnet_v1_152')

            shape = resNet152.get_shape().as_list()
            input_rgb = tf.reshape(
                resNet152,
                [int(shape[0] / temporal_length), temporal_length, 2048, 1])

            conv1 = tf.layers.conv2d(
                inputs=input_rgb,
                filters=16,
                kernel_initializer=tf.contrib.layers.xavier_initializer(),
                use_bias=True,
                kernel_size=[temporal_length, 1],
                padding="same",
                activation=None,
                strides=[temporal_length, 1],
                name='conv1_s')
            conv_bn1 = tf.layers.batch_normalization(
                conv1,
                training=(mode == tf.estimator.ModeKeys.TRAIN),
                name='conv_bn1_s')
            conv_act1 = tf.nn.relu(conv_bn1, name="conv_act1_s")

            conv_act1 = tf.transpose(conv_act1, [0, 3, 2, 1])

            conv2 = tf.layers.conv2d(
                inputs=conv_act1,
                filters=8,
                kernel_initializer=tf.contrib.layers.xavier_initializer(),
                use_bias=True,
                kernel_size=[1, 1],
                padding="same",
                activation=None,
                strides=[1, 1],
                name='conv2_s')
            conv_bn2 = tf.layers.batch_normalization(
                conv2,
                training=(mode == tf.estimator.ModeKeys.TRAIN),
                name='conv_bn2_s')
            conv_act2 = tf.nn.relu(conv_bn2, name="conv_act2_s")

            conv3 = tf.layers.conv2d(
                inputs=conv_act2,
                filters=4,
                kernel_initializer=tf.contrib.layers.xavier_initializer(),
                use_bias=False,
                kernel_size=[16, 1],
                padding="same",
                activation=None,
                strides=[16, 1],
                name='conv3_s')
            conv_bn3 = tf.layers.batch_normalization(
                conv3,
                training=(mode == tf.estimator.ModeKeys.TRAIN),
                name='conv_bn3_s')
            conv_act3 = tf.nn.relu(conv_bn3, name="conv_act3_s")

            return conv_act3