def cache_train_16():

    print('num_train_images =', train_wkt['ImageId'].nunique())
    train_shapes = shapes[shapes['image_id'].isin(
        train_wkt['ImageId'].unique())]
    np.save('../data/train_ids.npy', train_shapes['image_id'])
    min_train_height = train_shapes['height'].min()
    min_train_width = train_shapes['width'].min()

    ids = []
    i = 0

    for image_id in tqdm(sorted(train_wkt['ImageId'].unique())):
        image = extra_functions.read_image_16(image_id)
        _, height, width = image.shape

        img = image[:, :min_train_height, :min_train_width]
        img_mask = extra_functions.generate_mask(
            image_id,
            height,
            width,
            num_mask_channels=num_mask_channels,
            train=train_wkt)[:, :min_train_height, :min_train_width]

        np.save('../data/data_files/{}_img.npy'.format(image_id), img)
        np.save('../data/data_files/{}_mask.npy'.format(image_id), img_mask)
        ids += [image_id]
        i += 1
def cache_train_16():
    print('num_train_images =', train_wkt['ImageId'].nunique())

    train_shapes = shapes[shapes['image_id'].isin(
        train_wkt['ImageId'].unique())]

    min_train_height = train_shapes['height'].min()
    min_train_width = train_shapes['width'].min()

    num_train = train_shapes.shape[0]

    image_rows = min_train_height
    image_cols = min_train_width

    num_channels = 16

    num_mask_channels = 10

    f = h5py.File(os.path.join(data_path, 'train_16.h5'),
                  'w',
                  compression='blosc:lz4',
                  compression_opts=9)

    imgs = f.create_dataset('train',
                            (num_train, num_channels, image_rows, image_cols),
                            dtype=np.float16)
    imgs_mask = f.create_dataset(
        'train_mask', (num_train, num_mask_channels, image_rows, image_cols),
        dtype=np.uint8)

    ids = []

    i = 0
    for image_id in tqdm(sorted(train_wkt['ImageId'].unique())):
        image = extra_functions.read_image_16(image_id)
        _, height, width = image.shape

        imgs[i] = image[:, :min_train_height, :min_train_width]
        imgs_mask[i] = extra_functions.generate_mask(
            image_id,
            height,
            width,
            num_mask_channels=num_mask_channels,
            train=train_wkt)[:, :min_train_height, :min_train_width]

        ids += [image_id]
        i += 1

    # fix from there: https://github.com/h5py/h5py/issues/441
    f['train_ids'] = np.array(ids).astype('|S9')

    f.close()
Exemplo n.º 3
0
def cache_train_16():
    num_channels = 13
    num_mask_channels = 1
    for label_csv in tqdm(sorted(os.listdir(label_path))):
        # collect: type int, choose the COLLECT to transform
        if not COLLECTS[collect] in label_csv:
            continue
        train_wkt = pd.read_csv(
            os.path.join(image_path, 'summaryData_Train_2', label_csv))

        print("image number in collect :  ", label_csv, '   ',
              train_wkt['ImageId'].nunique(), label_csv.rstrip('.csv'))
        num_train = train_wkt['ImageId'].nunique()

        f = h5py.File(
            os.path.join(output_train_path,
                         label_csv.rstrip('.csv') + '.h5'), 'w')
        imgs = f.create_dataset('train', (num_train, num_channels, 900, 900),
                                dtype=np.float16)
        imgs_mask = f.create_dataset('train_mask',
                                     (num_train, num_mask_channels, 900, 900),
                                     dtype=np.uint8)

        i = 0
        print("<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>")
        print("normalization_value:     ", normalization_value[collect])
        print("<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>")

        for image_id in tqdm(sorted(train_wkt['ImageId'].unique())):
            # if image_id in imperfect_images:
            #     print("imperfect pictures:   ", image_id)
            #     continue

            imgs[i] = extra_functions.read_image_16(
                image_path, label_csv.rstrip('_Train.csv'), image_id,
                normalization_value[collect])
            imgs_mask[i] = extra_functions.generate_mask_for_image(
                image_id, train_wkt)

            # mask = imgs_mask[i, 0, :, :]
            # print(mask.shape)
            # ax1 = plt.subplot(111)
            # ax1.set_title(image_id)
            # ax1.imshow(mask)
            # plt.show()
            i = i + 1

        f.close()
def cache_train_16():
    print('num_train_images =', train_wkt['ImageId'].nunique())

    train_shapes = shapes[shapes['image_id'].isin(
        train_wkt['ImageId'].unique())]
    np.save('../data/train_ids.npy', train_shapes['image_id'])
    min_train_height = train_shapes['height'].min()
    min_train_width = train_shapes['width'].min()

    #    num_train = train_shapes.shape[0]
    #    image_rows = min_train_height
    #    image_cols = min_train_width
    #    num_channels = 16

    #f = h5py.File(os.path.join(data_path, 'train_16.h5'), 'w')
    #imgs = f.create_dataset('train', (num_train, num_channels, image_rows, image_cols), dtype=np.float16)
    #imgs_mask = f.create_dataset('train_mask', (num_train,image_rows, image_cols), dtype=np.uint8)

    ids = []
    i = 0

    for image_id in tqdm(sorted(train_wkt['ImageId'].unique())):
        image = extra_functions.read_image_16(image_id)
        _, height, width = image.shape

        img = image[:, :min_train_height, :min_train_width]
        img_mask = extra_functions.generate_mask(
            image_id,
            height,
            width,
            num_mask_channels=num_mask_channels,
            train=train_wkt)[:, :min_train_height, :min_train_width]

        np.save('../data/data_files/{}_img.npy'.format(image_id), img)
        np.save('../data/data_files/{}_mask.npy'.format(image_id), img_mask)
        ids += [image_id]
        i += 1
#%%
def mask2poly(predicted_mask, threashold, x_scaler, y_scaler):
    polygons = extra_functions.mask2polygons_layer(
        predicted_mask[0] > threashold, epsilon=0, min_area=5)

    polygons = shapely.affinity.scale(polygons,
                                      xfact=1.0 / x_scaler,
                                      yfact=1.0 / y_scaler,
                                      origin=(0, 0, 0))
    return shapely.wkt.dumps(polygons.buffer(2.6e-5))


#%%
for image_id in tqdm(test_ids):
    image = extra_functions.read_image_16(image_id)

    file_name = '{}.tif'.format(image_id)
    image_3 = tiff.imread(os.path.join(three_band_path, file_name))
    image_3 = np.transpose(image_3, (1, 2, 0))
    image_3 = image_3 / 2047 * 255
    image_3 = np.array(image_3, dtype=np.uint8)
    H = image.shape[1]
    W = image.shape[2]

    x_max, y_min = extra_functions._get_xmax_ymin(image_id)

    predicted_mask = extra_functions.make_prediction_cropped(
        model,
        image,
        initial_size=(128, 128),
Exemplo n.º 6
0
        print("<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>")
        print("normalization_value:     ", normalization_value[index])
        print("<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>")

        for i in range(n_ims):
            image_id = im_list[i]
            print(
                "<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>"
            )
            print("predict image id:     ", image_id)
            print(
                "<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>"
            )

            image = extra_functions.read_image_16(test_path, collect_name,
                                                  image_id,
                                                  normalization_value[index])

            predicted_mask = extra_functions.make_prediction_cropped(
                model,
                image,
                initial_size=(patch_width, patch_height),
                final_size=(patch_width - 32, patch_height - 32),
                num_masks=1,
                num_channels=channels)

            image_v = flip_axis(image, 1)
            predicted_mask_v = extra_functions.make_prediction_cropped(
                model,
                image_v,
                initial_size=(patch_width, patch_height),