コード例 #1
0
def cnn_visualize_activations(hyperparam_dict, activation_image,batch_size):

    scope_list = hyperparam_dict['layers']
    print(scope_list)
    #activation = hyperparam_dict['activations']
    activation = config.ACTIVATION
    activation_dict = {}

    for scope in scope_list:

        mod_weight_string = config.TF_WEIGHTS_STR + ':0'
        mod_bias_string = config.TF_BIAS_STR + ':0'
        with tf.variable_scope(scope,reuse=True):
            if 'conv' in scope:
                with tf.variable_scope('best', reuse=True):
                    weight, bias = tf.get_variable(config.TF_WEIGHTS_STR), tf.get_variable(config.TF_BIAS_STR)
                    weight.set_shape(hyperparam_dict[scope]['weights'])
                    bias.set_shape([hyperparam_dict[scope]['weights'][-1]])

                    if scope=='conv_0':
                        logger.info('\t\tConvolution with %s activation for %s', activation, scope)
                        h = models_utils.activate(
                            tf.nn.conv2d(activation_image, weight, strides=hyperparam_dict[scope]['stride'], padding='SAME') + bias,
                            activation, name='hidden')
                    else:
                        logger.info('\t\tConvolution with %s activation for %s', activation, scope)
                        h = models_utils.activate(
                            tf.nn.conv2d(h, weight, strides=hyperparam_dict[scope]['stride'], padding='SAME') + bias,
                            activation, name='hidden')

                activation_dict[scope] = h

            elif 'pool' in scope:
                logger.info('\t\tMax pooling for %s', scope)
                h = tf.nn.max_pool(h, hyperparam_dict[scope]['weights'], hyperparam_dict[scope]['stride'],
                                   padding='SAME', name='pool_hidden')

            elif 'fulcon' in scope:

                # Reshaping required for the first fulcon layer
                #if scope == 'fulcon_out':
                #    logger.info('\t\tFully-connected with output Logits for %s', scope)
                #    h = tf.matmul(h, weight) + bias

                if scope == 'fulcon_0':
                    h_shape = h.get_shape().as_list()
                    logger.info('\t\t\tReshaping the input (of size %s) before feeding to %s', scope, h_shape)
                    h = tf.reshape(h, [batch_size, h_shape[1] * h_shape[2] * h_shape[3]])

                    for di in ['left','straight','right']:
                        with tf.variable_scope(di,reuse=True):
                            with tf.variable_scope('best', reuse = True):
                                weight, bias = tf.get_variable(config.TF_WEIGHTS_STR), tf.get_variable(
                                    config.TF_BIAS_STR)

                                h_tmp = models_utils.activate(tf.matmul(h, weight) + bias, activation)
                                activation_dict[scope+'-'+di] = h_tmp


    return activation_dict
コード例 #2
0
def calculate_loss(tf_logits, tf_labels, weigh_by_frequency=False):
    mask_predictions = False
    random_masking = False
    use_heuristic_weights = True

    if not use_heuristic_weights:
        tf_label_weights_inv = 1.0 - tf.reduce_mean(tf.abs(tf_labels), axis=[0])
    else:
        tf_label_weights_inv =tf_labels * (tf.constant([1.0,0.5,1.0],dtype=tf.float32)) # Because we use 1-weights for weighing

    if mask_predictions and not random_masking:
        masked_preds = models_utils.activate(tf_logits,activation_type=output_activation) * tf.cast(tf.not_equal(tf_labels,0.0),dtype=tf.float32)
        if weigh_by_frequency:
            loss = tf.reduce_mean(tf.reduce_sum((masked_preds - tf_labels)**2 *tf_label_weights_inv,axis=[1]),axis=[0])
        else:
            loss = tf.reduce_mean(tf.reduce_sum((masked_preds - tf_labels) ** 2, axis=[1]),axis=[0])
    elif random_masking:
        rand_mask = tf.cast(tf.greater(tf.random_normal([config.BATCH_SIZE,3], dtype=tf.float32),0.0),dtype=tf.float32)
        masked_preds = models_utils.activate(tf_logits,activation_type=output_activation) * (tf.cast(tf.not_equal(tf_labels, 0.0), dtype=tf.float32) + rand_mask)
        if weigh_by_frequency:
            loss = tf.reduce_mean(tf.reduce_sum((masked_preds - tf_labels) ** 2 *tf_label_weights_inv, axis=[1]), axis=[0])
        else:
            loss = tf.reduce_mean(tf.reduce_sum((masked_preds - tf_labels) ** 2 , axis=[1]), axis=[0])
    else:
        # use appropriately to weigh output *(1-tf_label_weights)
        if weigh_by_frequency:
            loss = tf.reduce_mean(tf.reduce_sum(((models_utils.activate(tf_logits,activation_type=output_activation) - tf_labels)**2)*tf_label_weights_inv,axis=[1]),axis=[0])
        else:
            loss = tf.reduce_mean(tf.reduce_sum(
                ((models_utils.activate(tf_logits, activation_type=output_activation) - tf_labels) ** 2), axis=[1]), axis=[0])

    return loss
コード例 #3
0
def calculate_loss(tf_logits, tf_labels):
    use_cross_entropy = True
    if not use_cross_entropy:
        loss = tf.reduce_mean(tf.reduce_sum((
            (models_utils.activate(tf_logits, activation_type=out_activation) -
             tf_labels)**2),
                                            axis=[1]),
                              axis=[0])
    else:
        loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(labels=tf_labels,
                                                    logits=tf_logits))
    #loss = tf.reduce_mean(tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=tf_logits,labels=tf_labels),axis=[1]))
    return loss
コード例 #4
0
def logits_detached(tf_inputs,is_training):
    '''
    Inferencing the model. The input (tf_inputs) is propagated through convolutions poolings
    fully-connected layers to obtain the final softmax output
    :param tf_inputs: a batch of images (tensorflow placeholder)
    :return:
    '''
    global logger
    logger.info('Defining inference ops ...')
    all_directions = ['left', 'straight', 'right']
    with tf.name_scope('infer'):
        for si, scope in enumerate(config.TF_ANG_SCOPES):
            with tf.variable_scope(scope,reuse=True) as sc:

                if 'conv' in scope:
                    weight, bias = tf.get_variable(config.TF_WEIGHTS_STR), tf.get_variable(config.TF_BIAS_STR)
                    logger.info('\t\tConvolution with ReLU activation for %s',scope)
                    if si == 0:
                        if is_training and config.USE_DROPOUT:
                            tf_inputs = tf.nn.dropout(tf_inputs,1.0 - config.IN_DROPOUT,name='input_dropout')
                        h = models_utils.activate(tf.nn.conv2d(tf_inputs,weight,strides=stride_dict[scope],padding='SAME')+bias,activation,name='hidden')

                        if config.USE_DROPOUT:
                            h = h * tf.reshape(conv_dropout_placeholder_dict[scope],[1,1,1,-1])

                    else:
                        h = models_utils.activate(
                            tf.nn.conv2d(h, weight, strides=stride_dict[scope], padding='SAME') + bias, activation,
                            name='hidden')
                        if config.USE_DROPOUT:
                            h = h * tf.reshape(conv_dropout_placeholder_dict[scope], [1, 1, 1, -1])

                elif 'pool' in scope:
                    logger.info('\t\tMax pooling for %s', scope)
                    h = tf.nn.max_pool(h,config.TF_ANG_VAR_SHAPES_DETACHED[scope],config.TF_ANG_STRIDES[scope],padding='SAME',name='pool_hidden')
                else:
                    # Reshaping required for the first fulcon layer
                    if scope == 'out':
                        logger.info('\t\tFully-connected with output Logits for %s',scope)

                        h_out_list = []
                        for di in all_directions:
                            with tf.variable_scope(di, reuse=True):
                                weights, bias = tf.get_variable(config.TF_WEIGHTS_STR), tf.get_variable(
                                    config.TF_BIAS_STR)
                                h_out_list.append(tf.matmul(h_list[di], weights) + bias)

                        h = tf.squeeze(tf.stack(h_out_list,axis=1))

                    elif 'fc' in scope:
                        if scope == config.TF_FIRST_FC_ID:
                            h_shape = h.get_shape().as_list()
                            logger.info('\t\t\tReshaping the input (of size %s) before feeding to %s', scope, h_shape)
                            h = tf.reshape(h, [config.BATCH_SIZE, h_shape[1] * h_shape[2] * h_shape[3]])

                            h_list = {}
                            for di in all_directions:
                                with tf.variable_scope(di, reuse=True):
                                    weights, bias = tf.get_variable(config.TF_WEIGHTS_STR), tf.get_variable(
                                        config.TF_BIAS_STR)
                                    h_tmp = models_utils.activate(tf.matmul(h, weights) + bias, activation)

                                    if config.USE_DROPOUT:
                                        h_list[di]= tf.nn.dropout(h_tmp, keep_prob=1.0 - config.LAYER_DROPOUT, name='dropout')
                                    else:
                                        h_list[di]=h_tmp
                        else:
                            raise NotImplementedError
                    else:
                        raise NotImplementedError

    return h
コード例 #5
0
def cnn_visualize_activations_multiple(main_dir, sess, weights_filepath, hyperparam_filepath, activation_image,batch_size):
    hyperparam_dict = pickle.load(open(hyperparam_filepath,'rb'))

    scope_list = hyperparam_dict['layers']
    activation = hyperparam_dict['activations']

    activation_dict = {}

    for scope in scope_list:

        with tf.variable_scope(scope,reuse=True):
            if 'conv' in scope:
                h_per_di = []
                for di in config.TF_DIRECTION_LABELS:
                    with tf.variable_scope(di):
                        key = scope + config.TF_SCOPE_DIVIDER + di
                        weight, bias = tf.get_variable(config.TF_WEIGHTS_STR), tf.get_variable(config.TF_BIAS_STR)
                        weight.set_shape(hyperparam_dict[scope]['weights_size'])
                        bias.set_shape([hyperparam_dict[scope]['weights_size'][-1]])

                        if scope=='conv1':
                            logger.info('\t\tConvolution with %s activation for %s', activation, scope)
                            h_local = models_utils.activate(
                                tf.nn.conv2d(activation_image, weight, strides=hyperparam_dict[scope]['stride'], padding='SAME') + bias,
                                activation, name='hidden')
                            h_per_di.append(h_local)

                        else:
                            logger.info('\t\tConvolution with %s activation for %s', activation, scope)
                            h_local = models_utils.activate(
                                tf.nn.conv2d(h, weight, strides=hyperparam_dict[scope]['stride'], padding='SAME') + bias,
                                activation, name='hidden')
                            h_per_di.append(h_local)

                        activation_dict[key] = h_local

                h = tf.concat(h_per_di,axis=3)

            elif 'pool' in scope:
                logger.info('\t\tMax pooling for %s', scope)
                h = tf.nn.max_pool(h, hyperparam_dict[scope]['weights_size'], hyperparam_dict[scope]['stride'],
                                   padding='SAME', name='pool_hidden')

            else:

                # Reshaping required for the first fulcon layer
                if scope == 'out':
                    continue

                elif 'fc' in scope:
                    if scope == 'fc1':
                        h_per_di = []
                        for di in config.TF_DIRECTION_LABELS:
                            key = scope + config.TF_SCOPE_DIVIDER + di
                            with tf.variable_scope(di):

                                weight, bias = tf.get_variable(config.TF_WEIGHTS_STR), tf.get_variable(config.TF_BIAS_STR)

                                h_shape = h.get_shape().as_list()
                                logger.info('\t\t\tReshaping the input (of size %s) before feeding to %s', scope, h_shape)
                                h_local = tf.reshape(h, [batch_size, h_shape[1] * h_shape[2] * h_shape[3]])
                                h_local = models_utils.activate(tf.matmul(h_local, weight) + bias, activation)
                                h_per_di.append(h_local)

                            activation_dict[key] = h

                        h = tf.concat(h_per_di,axis=1)
                    else:
                        raise NotImplementedError
                else:
                    raise NotImplementedError

    return activation_dict
コード例 #6
0
def logits(tf_inputs, direction=None):
    '''
    Inferencing the model. The input (tf_inputs) is propagated through convolutions poolings
    fully-connected layers to obtain the final softmax output
    :param tf_inputs: a batch of images (tensorflow placeholder)
    :return:
    '''
    global logger
    logger.info('Defining inference ops ...')

    with tf.name_scope('infer'):

        for si, scope in enumerate(config.TF_ANG_SCOPES):
            with tf.variable_scope(scope, reuse=True) as sc:

                if 'conv' in scope:
                    logger.info('\t\tConvolution with ReLU activation for %s',
                                scope)
                    if si == 0:
                        h_per_di = []
                        used_labels = []
                        for di in config.TF_DIRECTION_LABELS:
                            if di in used_labels:
                                continue

                            with tf.variable_scope(di, reuse=True):
                                weight, bias = tf.get_variable(
                                    config.TF_WEIGHTS_STR), tf.get_variable(
                                        config.TF_BIAS_STR)
                                logger.info('\t\t\tConvolution %s (%s)', di,
                                            weight.get_shape().as_list())
                                if not config.USE_DILATION:
                                    h_per_di.append(
                                        models_utils.activate(
                                            tf.nn.conv2d(tf_inputs,
                                                         weight,
                                                         strides=config.
                                                         TF_ANG_STRIDES[scope],
                                                         padding='SAME') +
                                            bias,
                                            activation,
                                            name='hidden'))
                                else:

                                    h_per_di.append(
                                        models_utils.activate(
                                            tf.nn.convolution(
                                                tf_inputs,
                                                weight,
                                                dilation_rate=config.
                                                TF_DILATION[scope],
                                                padding='SAME') + bias,
                                            activation,
                                            name='dilated-hidden'))
                            used_labels.append(di)
                        h = tf.concat(values=h_per_di, axis=3)
                        logger.info('\t\tConcat Shape (%s)',
                                    h.get_shape().as_list())
                    else:
                        h_per_di = []
                        used_labels = []
                        for di in config.TF_DIRECTION_LABELS:
                            if di in used_labels:
                                continue
                            with tf.variable_scope(di, reuse=True):
                                weight, bias = tf.get_variable(
                                    config.TF_WEIGHTS_STR), tf.get_variable(
                                        config.TF_BIAS_STR)
                                logger.info('\t\t\tConvolution %s (%s)', di,
                                            weight.get_shape().as_list())

                                if not config.USE_DILATION:
                                    h_per_di.append(
                                        models_utils.activate(
                                            tf.nn.conv2d(h,
                                                         weight,
                                                         strides=config.
                                                         TF_ANG_STRIDES[scope],
                                                         padding='SAME') +
                                            bias,
                                            activation,
                                            name='hidden'))
                                else:
                                    h_per_di.append(
                                        models_utils.activate(
                                            tf.nn.convolution(
                                                h,
                                                weight,
                                                padding='SAME',
                                                dilation_rate=config.
                                                TF_DILATION[scope]) + bias,
                                            activation,
                                            name='dilated-hidden'))
                            used_labels.append(di)
                        h = tf.concat(values=h_per_di, axis=3)
                        logger.info('\t\tConcat Shape (%s)',
                                    h.get_shape().as_list())
                elif 'pool' in scope:
                    logger.info('\t\tMax pooling for %s', scope)
                    h = tf.nn.max_pool(
                        h,
                        config.TF_ANG_VAR_SHAPES_MULTIPLE[scope],
                        config.TF_ANG_STRIDES[scope],
                        padding='SAME',
                        name='pool_hidden')

                else:
                    # Reshaping required for the first fulcon layer
                    if scope == 'out':
                        if direction is None:
                            logger.info(
                                '\t\tFully-connected with output Logits for %s',
                                scope)
                            assert config.TF_ANG_VAR_SHAPES_MULTIPLE[scope][
                                0] == config.TF_ANG_VAR_SHAPES_MULTIPLE['fc1'][
                                    1] * 5
                            h_per_di = []

                            for di in config.TF_DIRECTION_LABELS:
                                with tf.variable_scope(di):
                                    weight, bias = tf.get_variable(
                                        config.TF_WEIGHTS_STR
                                    ), tf.get_variable(config.TF_BIAS_STR)
                                    h_per_di.append(
                                        tf.matmul(h, weight) + bias)

                            h = tf.concat(h_per_di, axis=1)
                        else:
                            with tf.variable_scope(direction):
                                weight, bias = tf.get_variable(
                                    config.TF_WEIGHTS_STR), tf.get_variable(
                                        config.TF_BIAS_STR)
                                h = tf.matmul(h, weight) + bias

                    elif 'fc' in scope:
                        if scope == config.TF_FIRST_FC_ID:
                            h_per_di = []
                            used_labels = []
                            for di in config.TF_DIRECTION_LABELS:
                                if di in used_labels:
                                    continue
                                with tf.variable_scope(di):
                                    weight, bias = tf.get_variable(
                                        config.TF_WEIGHTS_STR
                                    ), tf.get_variable(config.TF_BIAS_STR)
                                    h_shape = h.get_shape().as_list()
                                    logger.info(
                                        '\t\t\tReshaping the input (of size %s) before feeding to %s',
                                        scope, h_shape)
                                    h_di = tf.reshape(h, [
                                        batch_size,
                                        h_shape[1] * h_shape[2] * h_shape[3]
                                    ])
                                    h_per_di.append(
                                        models_utils.activate(
                                            tf.matmul(h_di, weight) + bias,
                                            activation))
                                used_labels.append(di)
                            h = tf.concat(h_per_di, axis=1)
                        else:
                            raise NotImplementedError
                    else:
                        raise NotImplementedError

    return h
コード例 #7
0
def predictions_with_inputs(tf_inputs):

    tf_logits = logits(tf_inputs)
    pred = models_utils.activate(tf_logits, activation_type=out_activation)
    return pred
コード例 #8
0
def logits(tf_inputs,is_training,direction=None):
    '''
    Inferencing the model. The input (tf_inputs) is propagated through convolutions poolings
    fully-connected layers to obtain the final softmax output
    :param tf_inputs: a batch of images (tensorflow placeholder)
    :return:
    '''
    global logger
    logger.info('Defining inference ops ...')
    with tf.name_scope('infer'):
        for si, scope in enumerate(scope_list):
            with tf.variable_scope(scope,reuse=True) as sc:

                if 'conv' in scope:
                    weight, bias = tf.get_variable(config.TF_WEIGHTS_STR), tf.get_variable(config.TF_BIAS_STR)
                    logger.info('\t\tConvolution with %s activation for %s',activation,scope)
                    if si == 0:
                        if is_training and config.USE_DROPOUT:
                            tf_inputs = tf.nn.dropout(tf_inputs,1.0 - config.IN_DROPOUT,name='input_dropout')
                        h = models_utils.activate(tf.nn.conv2d(tf_inputs,weight,strides=stride_dict[scope],padding='SAME')+bias,activation,name='hidden')
                    else:
                        h = models_utils.activate(tf.nn.conv2d(h, weight, strides=stride_dict[scope], padding='SAME') + bias, activation,
                                       name='hidden')
                elif 'pool' in scope:
                    logger.info('\t\tMax pooling for %s', scope)
                    h = tf.nn.max_pool(h,kernel_size_dict[scope],stride_dict[scope],padding='SAME',name='pool_hidden')
                    if is_training and config.USE_DROPOUT:
                        h = tf.nn.dropout(h, 1.0 - config.LAYER_DROPOUT, name='pool_dropout')
                else:

                    # Reshaping required for the first fulcon layer
                    if scope == 'out':
                        logger.info('\t\tFully-connected with output Logits for %s',scope)
                        if direction is None:
                            h_per_di = []
                            for di in ['left','straight','right']:
                                with tf.variable_scope(di,reuse=True):
                                    weight, bias = tf.get_variable(config.TF_WEIGHTS_STR), tf.get_variable(
                                        config.TF_BIAS_STR)
                                    h_per_di.append(tf.matmul(h, weight) + bias)

                            h = tf.concat(values=h_per_di,axis=1)
                        else:
                            with tf.variable_scope(direction, reuse=True):
                                weight, bias = tf.get_variable(config.TF_WEIGHTS_STR), tf.get_variable(
                                    config.TF_BIAS_STR)
                                h = tf.matmul(h, weight) + bias

                    elif 'fc' in scope:
                        weight, bias = tf.get_variable(config.TF_WEIGHTS_STR), tf.get_variable(config.TF_BIAS_STR)
                        if scope == config.TF_FIRST_FC_ID:
                            h_shape = h.get_shape().as_list()
                            logger.info('\t\t\tReshaping the input (of size %s) before feeding to %s', scope, h_shape)
                            h = tf.reshape(h, [config.BATCH_SIZE, h_shape[1] * h_shape[2] * h_shape[3]])
                            h = models_utils.activate(tf.matmul(h, weight) + bias, activation)

                            if is_training and config.USE_DROPOUT:
                                h = tf.nn.dropout(h,1.0-config.LAYER_DROPOUT, name='hidden_dropout')

                        else:
                            raise NotImplementedError
                    else:
                        raise NotImplementedError

    return h
コード例 #9
0
def predictions_with_inputs(tf_inputs):
    tf_logits = logits(tf_inputs,is_training=False,direction=None)
    return models_utils.activate(tf_logits,activation_type=output_activation)