コード例 #1
0
def saved_model(testing_dataset,
                model_id,
                _run,
                _log,
                batching=False,
                validation=False):
    load_gdrive_file(model_id, 'zip')
コード例 #2
0
def segmentation(image_path):
    image = cv2.imread(image_path)

    ZipFile(load_gdrive_file('12ONfO6WIS16xkfu6ucHEy4_5Tre0yxC5',
                             'zip')).extractall('/tmp/extracted_module')
    tf.compat.v1.enable_resource_variables()
    net = tf.saved_model.load('/tmp/extracted_module')

    # batch processing
    image = tf.expand_dims(image, 0)
    out = net.signatures['serving_default'](tf.cast(image, tf.float32))
    out = out['prediction'].numpy()[0]

    # map colors
    color_map = np.array([[128, 64, 128], [244, 35, 232], [70, 70, 70],
                          [102, 102, 156], [190, 153, 153], [153, 153, 153],
                          [250, 170, 30], [220, 220, 0], [107, 142, 35],
                          [152, 251, 152], [70, 130, 180], [220, 20, 60],
                          [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100],
                          [0, 80, 100], [0, 0, 230], [119, 11,
                                                      32]]).astype('int')
    disp = color_map[out].astype('uint8')[..., ::-1]  # convert to BGR for cv2
    directory, filename = os.path.split(image_path)
    cv2.imwrite(os.path.join(directory, f'{filename.split(".")[0]}_pred.png'),
                disp)
コード例 #3
0
def saved_model(testing_dataset,
                model_id,
                _run,
                _log,
                batching=False,
                validation=False):
    fsdata = FSData(**testing_dataset)

    # Hacks because tf.data is shit and we need to translate the dict keys
    def data_generator():
        dataset = fsdata.validation_set if validation else fsdata.testset
        for item in dataset:
            data = fsdata._get_data(training_format=False, **item)
            out = {}
            for m in fsdata.modalities:
                blob = crop_multiple(data[m])
                if m == 'rgb':
                    m = 'image_left'
                if 'mask' not in fsdata.modalities and m == 'labels':
                    m = 'mask'
                out[m] = blob
            yield out

    data_types = {}
    for key, item in fsdata.get_data_description()[0].items():
        if key == 'rgb':
            key = 'image_left'
        if 'mask' not in fsdata.modalities and key == 'labels':
            key = 'mask'
        data_types[key] = item

    data = tf.data.Dataset.from_generator(data_generator, data_types)

    ZipFile(load_gdrive_file(model_id,
                             'zip')).extractall('/tmp/extracted_module')
    tf.compat.v1.enable_resource_variables()
    net = tf.saved_model.load('/tmp/extracted_module')

    def eval_func(image):
        if batching:
            image = tf.expand_dims(image, 0)
        out = net.signatures['serving_default'](tf.cast(image, tf.float32))
        for key, val in out.items():
            print(key, val.shape, flush=True)
        return out['anomaly_score']

    fs = bdlb.load(benchmark="fishyscapes", download_and_prepare=False)
    _run.info['{}_anomaly'.format(model_id)] = fs.evaluate(eval_func, data)
コード例 #4
0
def saved_model(testing_dataset, model_id, _run, _log, batching=True):
    # testing_dataset is not used, but useful to keep config file consistent with other
    # tests
    data = tfds.load(
        name='cityscapes',
        split='validation',
        data_dir='/cluster/work/riner/users/blumh/tensorflow_datasets')
    label_lookup = tf.constant([
        -1, -1, -1, -1, -1, -1, -1, 0, 1, -1, -1, 2, 3, 4, -1, -1, -1, 5, -1,
        6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -1, -1, 16, 17, 18
    ])

    def label_lookup_map(batch):
        batch['segmentation_label'] = tf.gather_nd(
            label_lookup, tf.cast(batch['segmentation_label'], tf.int32))
        return batch

    data = data.map(label_lookup_map)
    if batching:
        data = data.batch(1)

    ZipFile(load_gdrive_file(model_id,
                             'zip')).extractall('/tmp/extracted_module')
    tf.compat.v1.enable_resource_variables()
    net = tf.saved_model.load('/tmp/extracted_module')

    m = tf.keras.metrics.MeanIoU(num_classes=19)
    for batch in tqdm(data, ascii=True):
        pred = net.signatures['serving_default'](tf.cast(
            batch['image_left'], tf.float32))
        labels = tf.reshape(batch['segmentation_label'], [-1])
        weights = tf.where(labels == -1, 0, 1)
        labels = tf.where(labels == -1, 0, labels)
        m.update_state(labels,
                       tf.reshape(pred['prediction'], [-1]),
                       sample_weight=weights)

    _run.info['mIoU'] = m.result().numpy()
コード例 #5
0
def saved_model(testing_dataset, model_id, _run, _log, batching=False, validation=False):
    data = tfds.load(name='cityscapes', split='validation',
                     data_dir='/cluster/work/riner/users/blumh/tensorflow_datasets')
    if batching:
        data = data.batch(1)
    data = data.prefetch(500)

    ZipFile(load_gdrive_file(model_id, 'zip')).extractall('/tmp/extracted_module')
    tf.compat.v1.enable_resource_variables()
    net = tf.saved_model.load('/tmp/extracted_module')

    def eval_func(image):
        out = net.signatures['serving_default'](tf.cast(image, tf.float32))
        return out['anomaly_score'], out['prediction']

    m = tf.keras.metrics.Mean()
    for batch in tqdm(data, ascii=True):
        start = time.time()
        eval_func(batch['image_left'])
        end = time.time()
        m.update_state(end - start)

    _run.info['{}_anomaly'.format(model_id)] = m.result().numpy()
コード例 #6
0
def download(testing_dataset, batching, model_id):
    load_gdrive_file(model_id, 'zip')
コード例 #7
0
def saved_model(image_path,
                name,
                model_id,
                scale='linear',
                labels=None,
                testing_dataset=None,
                batching=False,
                validation=None):
    image = cv2.imread(image_path)

    ZipFile(load_gdrive_file(model_id,
                             'zip')).extractall('/tmp/extracted_module')
    tf.compat.v1.enable_resource_variables()
    net = tf.saved_model.load('/tmp/extracted_module')

    def eval_func(image):
        if batching:
            image = tf.expand_dims(image, 0)
        out = net.signatures['serving_default'](tf.cast(image, tf.float32))
        for key, val in out.items():
            print(key, val.shape, flush=True)
        return out['anomaly_score']

    out = eval_func(image).numpy()
    if batching:
        out = out[0]

    if scale == 'exp':
        out = np.exp(out)
    elif scale == 'log':
        out = np.log(out)

    min_val, max_val = out.min(), out.max()
    disp = (out - min_val) / (max_val - min_val)
    disp = 255 - (np.clip(disp, 0, 1) * 255).astype('uint8')

    directory, filename = os.path.split(image_path)

    if labels is None:
        cv2.imwrite(
            os.path.join(directory, f'{filename.split(".")[0]}_{name}.jpg'),
            disp)
        return
    # since we have labels, check for the accuracy
    def data_generator():
        rgb = cv2.imread(image_path).astype('float32')
        label = cv2.imread(labels, cv2.IMREAD_ANYDEPTH).astype('int32')
        yield {'image_left': rgb, 'mask': label}

    data = tf.data.Dataset.from_generator(data_generator, {
        'image_left': tf.float32,
        'mask': tf.int32
    })
    fs = bdlb.load(benchmark='fishyscapes', download_and_prepare=False)
    metrics = fs.evaluate(eval_func, data)
    print(metrics['AP'], flush=True)
    cv2.imwrite(
        os.path.join(
            directory,
            f'{filename.split(".")[0]}_{name}_AP{100 * metrics["AP"]:.2f}.jpg'
        ), disp)