예제 #1
0
def build_val_graph(config, dataset):

    with tf.device('/cpu:0'):
        inputs, labels = image_processing.inputs(
            dataset,
            batch_size=config['parameters']['batch_size'],
            height=config['input']['height'],
            width=config['input']['width'],
            channels=config['input']['channels'],
            num_preprocess_threads=8)

    with tf.device('/gpu:0'):
        logits, endpoints = cnn_architectures.create_model(
            config['model']['architecture'],
            inputs,
            is_training=False,
            num_classes=config['input']['classes'],
            reuse=True)

    labels = tf.cast(labels, tf.int64)  # if needed,change to type int64
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    loss = tf.add_n([cross_entropy_mean] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), name='total_loss')

    correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    tf.summary.scalar('val/accuracy', accuracy, collections=['validation'])
    tf.summary.scalar('val/loss', loss, collections=['validation'])

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name, var, collections=['validation'])

    return loss, accuracy, tf.summary.merge_all(key='validation')
예제 #2
0
def build_val_graph(config, dataset):
    with tf.device('/cpu:0'):
        inputs, labels = image_processing.inputs(
            dataset,
            batch_size=config['parameters']['batch_size'],
            height=config['input']['height'],
            width=config['input']['width'],
            channels=config['input']['channels'],
            num_preprocess_threads=8)

    with tf.device('/gpu:0'):
        logits, endpoints = cnn_architectures.create_model(
            config['model']['architecture'],
            inputs,
            is_training=False,
            num_classes=config['input']['classes'],
            reuse=True)

    if config['parameters']['loss'] == 'regression':
        labels = tf.cast(labels - config['parameters']['label_mean'],
                         tf.float32)  # if needed,change to type int64
        mean_squared_error = tf.losses.mean_squared_error(labels=labels,
                                                          predictions=logits)
        loss = tf.add_n([mean_squared_error] +
                        tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES),
                        name='total_loss')
        accuracy = tf.constant(0, shape=[], dtype=tf.float32)
    if config['parameters']['loss'] == 'classification':
        labels = tf.cast(labels // 5, tf.int64)

        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=labels)
        cross_entropy_mean = tf.reduce_mean(cross_entropy)
        loss = tf.add_n([cross_entropy_mean] +
                        tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES),
                        name='total_loss')

        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.name_scope('metrics'):
        m_loss, loss_update_op = tf.contrib.metrics.streaming_mean(loss,
                                                                   name='loss')
        m_accuracy, accuracy_update_op = tf.contrib.metrics.streaming_mean(
            accuracy, name='accuracy')

    stream_vars = [i for i in tf.local_variables() if 'metrics' in i.name]
    reset_op = [tf.variables_initializer(stream_vars)]

    tf.summary.scalar('loss', m_loss, collections=['validation'])
    tf.summary.scalar('accuracy', accuracy, collections=['validation'])

    if config['output']['trainable_variables_to_summary']:
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var, collections=['validation'])

    return m_loss, m_accuracy, tf.summary.merge_all(
        key='validation'), tf.group(loss_update_op,
                                    accuracy_update_op), reset_op
예제 #3
0
    def __init__(self,
                 model_path=f'{ROOT_PATH}/models/location/base_M/',
                 cnn_input_size=224,
                 use_cpu=True):
        logging.info(
            f'Initialize {os.path.basename(model_path)} geolocation model.')
        self._cnn_input_size = cnn_input_size
        self._image_path_placeholder = tf.placeholder(tf.uint8,
                                                      shape=[None, None, None])
        self._image_crops, _ = self._img_preprocessing(
            self._image_path_placeholder)

        # load model config
        with open(os.path.join(model_path, 'cfg.json'), 'r') as cfg_file:
            cfg = json.load(cfg_file)

        # build cnn
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self._sess = tf.Session(config=config)

        model_file = os.path.join(model_path, 'model.ckpt')
        logging.info('\tRestore model from: {}'.format(model_file))

        with tf.variable_scope(os.path.basename(model_path)) as scope:
            self._scope = scope

        if use_cpu:
            device = '/cpu:0'
        else:
            device = '/gpu:0'

        with tf.variable_scope(self._scope):
            with tf.device(device):
                self._net, _ = cnn_architectures.create_model(
                    cfg['architecture'],
                    self._image_crops,
                    is_training=False,
                    num_classes=None,
                    reuse=None)

        var_list = {
            re.sub('^' + self._scope.name + '/', '', x.name)[:-2]: x
            for x in tf.global_variables(self._scope.name)
        }

        # restore weights
        saver = tf.train.Saver(var_list=var_list)
        saver.restore(self._sess, str(model_file))
예제 #4
0
def init_cnn(sess, args, config, images_placeholder):
    with tf.device('/gpu:0'):
        logits, _ = cnn_architectures.create_model(
            config['model']['architecture'],
            images_placeholder,
            is_training=False,
            num_classes=config['input']['classes'],
            reuse=None)

    saver = tf.train.Saver()
    saver.restore(sess, args.model)
    print('---------------------------')
    print('Restore model from: {}'.format(args.model))

    return tf.nn.softmax(tf.squeeze(logits))
예제 #5
0
    def __init__(self,
                 model_file,
                 cnn_input_size=224,
                 scope=None,
                 use_cpu=False):
        print('Initialize {} geolocation model.'.format(scope))
        self._cnn_input_size = cnn_input_size

        self._image_path_placeholder = tf.placeholder(tf.string, shape=())
        image_content = tf.read_file(self._image_path_placeholder)
        self._image_crops, self._bboxes = self._img_preprocessing(
            image_content)

        # load model config
        with open(os.path.join(os.path.dirname(model_file),
                               'cfg.json')) as cfg_file:
            cfg = json.load(cfg_file)

        # get partitioning
        print('\tGet geographical partitioning(s) ... ')
        partitioning_files = []
        partitionings = []
        for partitioning in cfg['partitionings']:
            partitioning_files.append(
                os.path.join(os.path.dirname(__file__), 'geo-cells',
                             partitioning))
            partitionings.append(partitioning)
        if len(partitionings) > 1:
            partitionings.append('hierarchy')

        self._num_partitionings = len(partitioning_files)  # without hierarchy

        # red geo partitioning
        classes_geo, hexids2classes, class2hexid, cell_centers = self._read_partitioning(
            partitioning_files)
        self._classes_geo = classes_geo
        self._cell_centers = cell_centers

        # get geographical hierarchy
        self._cell_hierarchy = self._get_geographical_hierarchy(
            classes_geo, hexids2classes, class2hexid, cell_centers)

        # build cnn
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self._sess = tf.Session(config=config)

        print('\tRestore model from: {}'.format(model_file))

        if scope is not None:
            with tf.variable_scope(scope) as scope:
                self._scope = scope
        else:
            self._scope = tf.get_variable_scope()

        if use_cpu:
            device = '/cpu:0'
        else:
            device = '/gpu:0'

        with tf.variable_scope(self._scope):
            with tf.device(device):
                net, _ = cnn_architectures.create_model(cfg['architecture'],
                                                        self._image_crops,
                                                        is_training=False,
                                                        num_classes=None,
                                                        reuse=None)

                with tf.variable_scope('classifier_geo', reuse=None):
                    self.logits = slim.conv2d(net,
                                              np.sum(classes_geo), [1, 1],
                                              activation_fn=None,
                                              normalizer_fn=None,
                                              scope='logits')
                    self.logits = tf.squeeze(self.logits)

        var_list = {
            re.sub('^' + self._scope.name + '/', '', x.name)[:-2]: x
            for x in tf.global_variables(self._scope.name)
        }

        # restore weights
        saver = tf.train.Saver(var_list=var_list)
        saver.restore(self._sess, model_file)

        # get activations from last conv layer and output weights in order to calculate class activation maps
        self._activations = tf.get_default_graph().get_tensor_by_name(
            self._scope.name + '_1/resnet_v2_101/activations:0')
        activation_weights = tf.get_default_graph().get_tensor_by_name(
            self._scope.name + '/classifier_geo/logits/weights:0')

        activation_weights_v = self._sess.run(activation_weights)
        p_activation_weights = []
        for p in range(self._num_partitionings):
            p_activation_weights.append(
                activation_weights_v[:, :, :,
                                     np.sum(self._classes_geo[0:p + 1]):np.
                                     sum(self._classes_geo[0:p + 2])])

        self.network_dict = {
            'activation_weights': p_activation_weights,
            'partitionings': partitionings,
            'classes_geo': self._classes_geo,
            'cell_centers': self._cell_centers,
            'scope': self._scope.name
        }