Esempio n. 1
0
def infer_one_img(file):
    input_img = imread(file)
    input_batch_list, coordinate_batch_list, height, width = create_patch_batch_list(filename=file, batch_size=128, data_dir=data_dir)
    logits_map = batch_logits_map_inference(input_tensor, logits, keep_probability, sess, is_training, input_batch_list, coordinate_batch_list, height, width)
    # Inferring
    pred_annotation_map = np.array(np.argmax(logits_map, axis=2), dtype=np.uint8)
    imwrite('mask_1_' + basename(file), np.transpose(np.transpose(input_img, (2, 0, 1)) * pred_annotation_map, (1, 2, 0)))
    _, thresh = threshold(pred_annotation_map, 0, 255, 0)
    _, contours, _ = findContours(thresh, RETR_EXTERNAL, 2)
    for cnt in contours:
        x,y,w,h = boundingRect(cnt)
        for i in range(w):
            count_white = 0
            for j in range(h):
                if thresh[y + j, x + i] == 255:
                    count_white += 1
            if count_white / h <= CHOPPING_RATIO:
                for j in range(h):
                    pred_annotation_map[y + j, x + i] = 0
    _, thresh = threshold(pred_annotation_map, 0, 255, 0)
    imwrite('mask_2_' + basename(file), np.transpose(np.transpose(input_img, (2, 0, 1)) * pred_annotation_map, (1, 2, 0)))

    boxes_map = np.zeros_like(pred_annotation_map, dtype=np.uint8)
    _, thresh = threshold(pred_annotation_map, 0, 255, 0)
    _, contours, _ = findContours(thresh, RETR_EXTERNAL, 2)
    name_boxes = []
    for cnt in contours:
        if contourArea(cnt) > 200:
        x,y,w,h = boundingRect(cnt)
            input_height, _, _ = input_img.shape
            # x = max(0, x - pad)
            y = max(0, y - pad)
            # end_x = min(x + w + pad, input_width)
            end_x = x + w
            end_y = min(y + h + pad, input_height)
            # w = end_x - x
            h = end_y - y
            if w > h / 1.75:
                name_boxes.append([x, y, end_x, end_y])
        for i in range(w):
            for j in range(h):
                boxes_map[y + j, x + i] = 1
    height = pred_annotation_map.shape[0]
    width = pred_annotation_map.shape[1]
    output_words = np.transpose(np.transpose(input_img, (2, 0, 1)) * boxes_map, (1, 2, 0))
    imwrite(basename(file), output_words)
        top_img[i] = imread("../ISPRS_semantic_labeling_Vaihingen/top/" +
                            filename[i] + ".tif")
        logits_maps[i] = np.zeros(
            (top_img[i].shape[0], top_img[i].shape[1], 6), dtype=np.float32)
        # gt_annotation_maps[i] = imread("../ISPRS_semantic_labeling_Vaihingen/annotations/" + filename[i] + ".png", -1) # Comment if for submission
        # num_pixels += gt_annotation_maps[i].shape[0] * gt_annotation_maps[i].shape[1] # Comment if for submission

    # Accumulate logits maps
    ckpt = tf.train.get_checkpoint_state(argv[1])  # checkpoint directory
    for ckpt_path in ckpt.all_model_checkpoint_paths:
        # for ckpt_path in [tf.train.get_checkpoint_state(argv[1]).model_checkpoint_path]:
        saver.restore(sess, ckpt_path)
        print(">> Restored:", ckpt_path)
        for i in range(num_img_files):
            print(ckpt_path, "inferring", filename[i])
            input_batch_list, coordinate_batch_list, height, width = create_patch_batch_list(
                filename=filename[i], batch_size=128, num_channels=5)
            current_logits_map = batch_logits_map_inference(
                input_tensor, logits, keep_probability, sess, is_training,
                input_batch_list, coordinate_batch_list, height, width)
            logits_maps[i] += current_logits_map

    # Inferring
    for i in range(num_img_files):
        pred_annotation_maps[i] = np.argmax(logits_maps[i], axis=2)
        # num_matches += np.sum(pred_annotation_maps[i] == gt_annotation_maps[i]) # Comment if for submission
        height = pred_annotation_maps[i].shape[0]
        width = pred_annotation_maps[i].shape[1]
        output_image = np.zeros((height, width, 3), dtype=np.uint8)

        print("Generating", filename[i] + '.tif......')
        for y in range(height):
Esempio n. 3
0
def infer_img(file, model_output_dir=argv[1]):
    global num_matches
    global num_pixels
    if not exists(model_output_dir):
        mkdir(model_output_dir)

    gt_annotation_map = np.array(imread(join(data_dir, annotation_dir, file), -1), dtype=np.uint8)
    if gt_annotation_map.shape[0] >= 32 and gt_annotation_map.shape[1] >= 32:
        input_img = imread(join(data_dir, input_dir, file))

        input_batch_list, coordinate_batch_list, height, width = create_patch_batch_list(filename=file, batch_size=128, data_dir=data_dir)
        logits_map = batch_logits_map_inference(input_tensor, logits, keep_probability, sess, is_training, input_batch_list, coordinate_batch_list, height, width)

        # Inferring
        pred_annotation_map = np.array(np.argmax(logits_map, axis=2), dtype=np.uint8)
        _, thresh = threshold(pred_annotation_map, 0, 255, 0)
        _, contours, _ = findContours(thresh, RETR_EXTERNAL, 2)
        for cnt in contours:
            x,y,w,h = boundingRect(cnt)
            for i in range(w):
                count_white = 0
                for j in range(h):
                    if thresh[y + j, x + i] == 255:
                        count_white += 1
                if count_white / h <= CHOPPING_RATIO:
                    for j in range(h):
                        pred_annotation_map[y + j, x + i] = 0
        if not exists(join(model_output_dir, pred_masks_dir)):
            mkdir(join(model_output_dir, pred_masks_dir))
        _, thresh = threshold(pred_annotation_map, 0, 255, 0)
        imwrite(join(model_output_dir, pred_masks_dir, file), thresh)

        boxes_map = np.zeros_like(pred_annotation_map, dtype=np.uint8)
        _, thresh = threshold(pred_annotation_map, 0, 255, 0)
        _, contours, _ = findContours(thresh, RETR_EXTERNAL, 2)
        json_name = file.replace('png', 'json')
        json_dict = {}
        name_boxes = []
        for cnt in contours:
            if contourArea(cnt) > 200:
                x,y,w,h = boundingRect(cnt)
                input_height, input_width, _ = input_img.shape
                # x = max(0, x - pad)
                y = max(0, y - pad)
                # end_x = min(x + w + pad, input_width)
                end_x = x + w
                end_y = min(y + h + pad, input_height)
                # w = end_x - x
                h = end_y - y
                if w > h / 1.75:
                    name_boxes.append([x, y, end_x, end_y])
                    for i in range(w):
                        for j in range(h):
                            boxes_map[y + j, x + i] = 1
        _, thresh_boxes = threshold(boxes_map, 0, 255, 0)
        if not exists(join(model_output_dir, pred_boxed_masks_dir)):
            mkdir(join(model_output_dir, pred_boxed_masks_dir))
        imwrite(join(model_output_dir, pred_boxed_masks_dir, file), thresh_boxes)
        json_dict['name_boxes'] = name_boxes
        print(json_dict)
        if not exists(output_json_dir):
            mkdir(output_json_dir)
        with open(join(output_json_dir, json_name), 'w') as outfile:
            json.dump(json_dict, outfile)

        height = pred_annotation_map.shape[0]
        width = pred_annotation_map.shape[1]
        # _, contours, _ = findContours(thresh, RETR_EXTERNAL, 2)
        num_pixels += height * width
        num_matches += np.sum(pred_annotation_map == gt_annotation_map)

        output_words = np.transpose(np.transpose(input_img, (2, 0, 1)) * boxes_map, (1, 2, 0))
        if not exists(join(model_output_dir, words_dir)):
            mkdir(join(model_output_dir, words_dir))
        imwrite(join(model_output_dir, words_dir, file), output_words)