コード例 #1
0
def further_partition_data():
    train_ids = Table().read_table(
        dp.ids_by_data_set('train')).column('Id').tolist()
    validate_ids = Table().read_table(
        dp.ids_by_data_set('validate')).column('Id').tolist()

    split_index = len(train_ids) // 3

    validate_0_ids = validate_ids
    validate_1_ids = train_ids[:split_index]
    validate_2_ids = train_ids[split_index:2 * split_index]
    validate_3_ids = train_ids[2 * split_index:]

    train_0_ids = train_ids
    train_1_ids = validate_0_ids + validate_2_ids + validate_3_ids
    train_2_ids = validate_0_ids + validate_1_ids + validate_3_ids
    train_3_ids = validate_0_ids + validate_1_ids + validate_2_ids

    for i in range(4):
        train_name, validate_name = 'train_{}'.format(i), 'validate_{}'.format(
            i)
        Table().with_column('Id', eval(train_name + '_ids')).to_csv(
            dp.ids_by_data_set(train_name))
        Table().with_column('Id', eval(validate_name + '_ids')).to_csv(
            dp.ids_by_data_set(validate_name))
コード例 #2
0
def test_partition_stage1_data():
    train_ids = set(Table().read_table(
        dp.ids_by_data_set('train')).column('Id'))
    validate_ids = set(Table().read_table(
        dp.ids_by_data_set('validate')).column('Id'))
    test_ids = set(Table().read_table(dp.ids_by_data_set('test')).column('Id'))

    assert train_ids.intersection(validate_ids) == set()
    assert train_ids.intersection(test_ids) == set()
    assert validate_ids.intersection(test_ids) == set()
コード例 #3
0
ファイル: create_png.py プロジェクト: AlonDaks/tsa-kaggle
def _create_pngs(data_set, file_format):
    if file_format == 'aps':
        num_slcs, indices = 16, range(16)
    elif file_format == 'a3daps':
        num_slcs, indices = 64, [0]
    else:
        raise Exception('Invalid file format. Must be either aps or a3daps')
    ids = Table().read_table(dp.ids_by_data_set(data_set)).column('Id')
    for _id in ids:
        in_file = '{0}/data/raw/{1}/{2}.{1}'.format(dp.LARGE_DATA_BIN,
                                                    file_format, _id)
        header = read_header(in_file)
        with open(in_file, 'rb') as f:
            f.seek(CONST.RAW_HEADER_LENGTH)
            image = np.fromfile(f,
                                dtype=np.uint16,
                                count=num_slcs * CONST.A3DAPS_HEIGHT *
                                CONST.A3DAPS_WIDTH)
        image = image.reshape(num_slcs, CONST.A3DAPS_HEIGHT,
                              CONST.A3DAPS_WIDTH).copy()
        for index in indices:
            image_slice = image[index, :, :]
            image_slice = image_slice.astype(np.float32)
            image_slice *= header['data_scale_factor']
            image_slice = np.flip(image_slice, axis=0)
            scipy.misc.imsave(
                '{0}/data/raw/{1}_png/{2}/{3}.png'.format(
                    dp.LARGE_DATA_BIN, file_format, index,
                    str(index) + '_' + _id), image_slice)
コード例 #4
0
def _inference(data_set, keypoint):
    tf.reset_default_graph()

    ids = Table().read_table(dp.ids_by_data_set(data_set)).column('Id')
    base_png_path = dp.LARGE_DATA_BIN + '/data/raw/'
    if keypoint.file_format == 'a3daps':
        base_png_path += 'a3daps_png/'
    else:
        base_png_path += 'aps_png/'
    base_xml_output_path = dp.REPO_HOME_PATH + '/data/bbox/keypoint_inference/' + keypoint.name

    images_tensor = tf.placeholder(tf.float32, (None, 660, 512, 1))
    model = KeypointDetectModel(batch_size=1,
                                image_dim=(660, 512),
                                anchor_kwargs=keypoint.anchor_kwargs)
    logits_tensor, bboxes_tensor = model.inference(images_tensor, test=True)
    logits_tensor = tf.sigmoid(logits_tensor)
    bboxes_tensor = target_to_box(bboxes_tensor, model.anchors)

    saver = tf.train.Saver(tf.global_variables())

    with tf.Session() as sess:
        saver.restore(
            sess,
            tf.train.get_checkpoint_state(
                dp.train_dir('{0}_keypoint'.format(
                    keypoint.name))).model_checkpoint_path)
        for _id in ids:
            image = np.array(
                Image.open('{0}/{1}/{1}_{2}.png'.format(
                    base_png_path, keypoint.slices[0],
                    _id))).astype(np.float32)
            image -= np.mean(image)
            logits, bboxes = sess.run(
                [logits_tensor, bboxes_tensor],
                feed_dict={images_tensor: image[np.newaxis, :, :, np.newaxis]})
            nms_result = non_max_suppression(bboxes,
                                             logits,
                                             min_clique_size=1,
                                             score_threshold=0.05,
                                             iou_threshold=0.4)
            if len(nms_result) > 0:
                bbox = [int(round(i)) for i in nms_result[0][0]]
            elif keypoint.name == 'butt':
                bbox = [180, 359, 340, 438]
            elif keypoint.name == 'face':
                bbox = [204, 137, 302, 220]
            else:
                bbox = 4 * [0]
            write_xml(None, None, None,
                      [bbox])(base_xml_output_path + '/{}.xml'.format(_id))
コード例 #5
0
def partition_stage1_data():
    np.random.seed(42)
    stage1_labels = Table().read_table(dp.stage1_labels())
    unique_ids = list({i.split('_')[0] for i in stage1_labels.column('Id')})
    np.random.shuffle(unique_ids)
    split_index = int(len(unique_ids) * TRAIN_PERCENT)
    train_ids = unique_ids[:split_index]
    test_ids = unique_ids[split_index:]

    Table().with_column('Id', train_ids).to_csv(dp.ids_by_data_set('train'))
    Table().with_column('Id', test_ids).to_csv(dp.ids_by_data_set('validate'))

    train_ids = set(train_ids)

    train_labels, test_labels = {}, {}
    for i in range(stage1_labels.num_rows):
        full_id = stage1_labels.row(i).item(0)
        label = int(stage1_labels.row(i).item(1))
        split_id = full_id.split('_')
        id, zone = split_id[0], split_id[1].replace('Zone', '')

        if id in train_ids:
            if id not in train_labels:
                train_labels[id] = {zone: label}
            else:
                train_labels[id][zone] = label
        else:
            if id not in test_labels:
                test_labels[id] = {zone: label}
            else:
                test_labels[id][zone] = label

    with open(dp.labels_by_data_set('train', 'json'), 'w') as f:
        json.dump(train_labels, f, indent=4)
    with open(dp.labels_by_data_set('validate', 'json'), 'w') as f:
        json.dump(test_labels, f, indent=4)
コード例 #6
0
def _curate_arm_calibration(data_set):
    ids = Table().read_table(dp.ids_by_data_set(data_set)).column('Id')
    X, y_label, y_zone = [], [], []

    for _id in ids:
        print(_id)
        logits, bboxes = threat_detect_inference(_id, 'arm')
        arcs = build_arcs('arm', logits, bboxes, _id)
        for i in range(len(arcs)):
            arc = arcs[i]
            X.append(arc.to_list())
            zone = i + 1
            y_label.append(_ground_truth_label(_id, zone))
            y_zone.append(zone)

    X, y_label, y_zone = np.array(X), np.array(y_label), np.array(y_zone)
    return X, y_label, y_zone, ids
コード例 #7
0
def _inference(data_set, region):
    if region == 'arm':
        region_inference_func, zones = _arm_inference, [1, 2, 3, 4]
    if region == 'torso':
        region_inference_func, zones = _torso_inference, [5, 6, 7, 17]
    if region == 'thigh':
        region_inference_func, zones = _thigh_inference, [8, 9, 10, 11, 12]
    if region == 'calf':
        region_inference_func, zones = _calf_inference, [13, 14, 15, 16]

    ids = Table().read_table(dp.ids_by_data_set(data_set)).column('Id')
    for _id in ids:
        try:
            region_inference_func(_id)
        except:
            for zone in zones:
                print('{0}_Zone{1},{2}'.format(_id, zone, 0.1))
コード例 #8
0
def _curate_torso_calibration(data_set):
    ids = Table().read_table(dp.ids_by_data_set(data_set)).column('Id')
    X, y_label, y_zone, example_ids = [], [], [], []
    xml_base_zone_path = dp.REPO_HOME_PATH + '/data/bbox/aps_threats/zone_{2}/{0}_{1}.xml'
    for _id in ids:
        print(_id)
        logits, bboxes = threat_detect_inference(_id, 'torso')
        _, cropped_bbox = crop_and_pad(_id,
                                       file_format='aps',
                                       region='torso',
                                       slc=0,
                                       keypoint_pad=None)
        arcs = build_arcs('torso', logits, bboxes, _id)
        for i in range(len(arcs)):
            arc = arcs[i]
            matched = False
            for zone in [5, 6, 7, 17]:
                num_matches = 0
                original_scale_boxes = arc.original_scale_boxes(cropped_bbox)
                for i in range(original_scale_boxes.shape[0]):
                    gt_xml_path = xml_base_zone_path.format(i, _id, zone)
                    if os.path.exists(gt_xml_path):
                        gt_bboxes = np.array(parse_bbox_xml(gt_xml_path))
                        if gt_bboxes.size > 0:
                            if np.max(
                                    _iou(gt_bboxes, original_scale_boxes[i][
                                        np.newaxis])) > 0.5:
                                num_matches += 1
                matched = num_matches / np.sum(
                    np.sum(original_scale_boxes, axis=1) > 0) >= 0.5
                if matched:
                    X.append(arc.to_list())
                    y_label.append(int(matched))
                    y_zone.append(zone)
                    example_ids.append(_id)
                    break
            if not matched:
                X.append(arc.to_list())
                y_label.append(int(matched))
                y_zone.append(zone)
                example_ids.append(_id)

    X, y_label, y_zone = np.array(X), np.array(y_label), np.array(y_zone)
    return X, y_label, y_zone, example_ids
コード例 #9
0
def create_keypoint_tf_record(data_set, region, random_seed):
    np.random.seed(random_seed)

    if region == 'face':
        keypoint = FACE
    elif region == 'butt':
        keypoint = BUTT

    ids = Table().read_table(dp.ids_by_data_set(data_set)).column('Id')

    tf_record_name = '{}_keypoint'.format(keypoint.name)
    print('Creating: {0}_{1}'.format(data_set, tf_record_name))
    writer = tf.python_io.TFRecordWriter(
        dp.tf_records(data_set, tf_record_name))

    base_xml_path = dp.REPO_HOME_PATH + '/data/bbox/' + keypoint.name
    base_png_path = dp.LARGE_DATA_BIN + '/data/raw/'
    if keypoint.file_format == 'a3daps':
        base_png_path += 'a3daps_png/'
    else:
        base_png_path += 'aps_png/'
    for _id in ids:
        for i in keypoint.slices:
            image = np.array(
                Image.open('{0}/{1}/{1}_{2}.png'.format(
                    base_png_path, i, _id))).astype(np.float32)
            xml_path = '{0}/{1}_{2}.xml'.format(base_xml_path, i, _id)
            if os.path.exists(xml_path):
                bboxes = np.array(parse_bbox_xml(xml_path)).ravel()
                bboxes = np.append(bboxes, (BOX_COUNT * 4 - len(bboxes)) *
                                   [0]).astype(np.int64)
                if np.sum(bboxes) == 0:
                    continue
                example = tf.train.Example(features=tf.train.Features(
                    feature={
                        'image': _bytes_feature(image.tostring()),
                        'bbox': _bbox_feature(bboxes),
                    }))
                writer.write(example.SerializeToString())
    writer.close()
コード例 #10
0
def _create_localization_tf_record(data_set, region, random_seed):
    np.random.seed(random_seed)

    ids = Table().read_table(dp.ids_by_data_set(data_set)).column('Id')

    tf_record_name = '{0}_localization'.format(region)
    print('Creating: {0}_{1}'.format(data_set, tf_record_name))
    writer = tf.python_io.TFRecordWriter(
        dp.tf_records(data_set, tf_record_name))

    base_xml_path = '{0}/data/bbox/aps_threats/zone_'.format(dp.REPO_HOME_PATH)

    if region == 'arm':
        zones = {1: 0, 2: 1, 3: 2, 4: 3}
    if region == 'thigh':
        zones = {8: 0, 9: 1, 10: 2, 11: 3, 12: 4}

    masks = [
        make_vertical_stripe_mask((4, 4), (4, 4)),
        make_vertical_stripe_mask((4, 4), (4, 15)),
        make_horizontal_stripe_mask((4, 4), (4, 4)),
        make_horizontal_stripe_mask((4, 4), (15, 4)),
        make_squares_mask((10, 10), (10, 25)),
        make_squares_mask((5, 5), (10, 25)),
        make_squares_mask((5, 5), (5, 15)),
        make_vertical_stripe_mask((2, 2), (2, 2))
    ]

    examples = []
    for _id in ids:
        for slc in range(16):
            for zone in zones.keys():
                xml_path = base_xml_path + '{0}/{1}_{2}.xml'.format(
                    zone, slc, _id)
                if os.path.exists(xml_path):
                    boxes = parse_bbox_xml(xml_path)
                    if len(boxes) > 0:
                        examples += [(_id, slc, zone)]

    np.random.shuffle(examples)

    for _id, slc, zone in examples:
        image, cropped_bbox = crop_and_pad(_id,
                                           region,
                                           'aps',
                                           slc,
                                           keypoint_pad=None)
        pad = pad_dim((cropped_bbox[3] - cropped_bbox[1],
                       cropped_bbox[2] - cropped_bbox[0]))
        xml_path = base_xml_path + '{0}/{1}_{2}.xml'.format(zone, slc, _id)

        box = parse_bbox_xml(xml_path)[0]
        rescaled_box = [
            box[0] - cropped_bbox[0] + pad[1],
            box[1] - cropped_bbox[1] + pad[0],
            box[2] - cropped_bbox[0] + pad[1],
            box[3] - cropped_bbox[1] + pad[0]
        ]
        rescaled_box[0] = max(rescaled_box[0], pad[1])
        rescaled_box[1] = max(rescaled_box[1], pad[0])
        rescaled_box[2] = min(rescaled_box[2],
                              cropped_bbox[2] - cropped_bbox[0] + pad[1])
        rescaled_box[3] = min(rescaled_box[3],
                              cropped_bbox[3] - cropped_bbox[1] + pad[0])
        bboxes = np.array(rescaled_box).astype(np.int64)
        image = image.astype(np.float32)
        label = zones[zone]
        example = tf.train.Example(features=tf.train.Features(
            feature={
                'image':
                _bytes_feature(image.tostring()),
                'bbox':
                _bbox_feature(bboxes),
                'dim':
                tf.train.Feature(int64_list=tf.train.Int64List(
                    value=[image.shape[0]])),
                'label':
                tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
                'slc':
                tf.train.Feature(int64_list=tf.train.Int64List(value=[slc]))
            }))
        writer.write(example.SerializeToString())
        if 'train' in data_set:
            for _ in range(len(masks) - 1):
                writer.write(example.SerializeToString())
            for mask in masks:
                masked_image = (image * mask(image.shape)).astype(np.float32)
                example = tf.train.Example(features=tf.train.Features(
                    feature={
                        'image':
                        _bytes_feature(masked_image.tostring()),
                        'bbox':
                        _bbox_feature(bboxes),
                        'dim':
                        tf.train.Feature(int64_list=tf.train.Int64List(
                            value=[image.shape[0]])),
                        'label':
                        tf.train.Feature(int64_list=tf.train.Int64List(
                            value=[label])),
                        'slc':
                        tf.train.Feature(int64_list=tf.train.Int64List(
                            value=[slc]))
                    }))
                writer.write(example.SerializeToString())
    writer.close()
コード例 #11
0
def _create_full_tf_record(data_set,
                           file_format,
                           region,
                           keypoint_pad,
                           random_seed,
                           example_multiple=3):
    np.random.seed(random_seed)

    ids = Table().read_table(dp.ids_by_data_set(data_set)).column('Id')

    tf_record_name = '{0}_crop_patch_full_{1}'.format(file_format, region)
    print('Creating: {0}_{1}'.format(data_set, tf_record_name))
    writer = tf.python_io.TFRecordWriter(
        dp.tf_records(data_set, tf_record_name))

    base_xml_path = '{0}/data/bbox/{1}_merged_threats_{2}'.format(
        dp.REPO_HOME_PATH, file_format, region)

    masks = [
        make_vertical_stripe_mask((4, 4), (4, 4)),
        make_vertical_stripe_mask((4, 4), (4, 15)),
        make_horizontal_stripe_mask((4, 4), (4, 4)),
        make_horizontal_stripe_mask((4, 4), (15, 4)),
        make_squares_mask((10, 10), (10, 25)),
        make_squares_mask((5, 5), (10, 25)),
        make_squares_mask((5, 5), (5, 15)),
        make_vertical_stripe_mask((2, 2), (2, 2))
    ]

    if file_format == 'aps':
        slcs = range(16)
    if file_format == 'a3daps':
        slcs = [t for t in range(64) if t % 2 == 0]

    existing_paths, non_existing_paths = 0, 0
    for _id in ids:
        for i in slcs:
            xml_path = '{0}/{1}_{2}.xml'.format(base_xml_path, i, _id)
            if os.path.exists(xml_path):
                existing_paths += 1
            else:
                non_existing_paths += 1

    ids_and_slcs = []
    for _id in ids:
        for i in slcs:
            xml_path = '{0}/{1}_{2}.xml'.format(base_xml_path, i, _id)
            if os.path.exists(xml_path):
                ids_and_slcs += example_multiple * [(_id, i)]
            elif np.random.binomial(
                    1,
                    min(1, example_multiple * existing_paths /
                        non_existing_paths)):
                ids_and_slcs += [((_id, i))]
    np.random.shuffle(ids_and_slcs)
    for _id, i in ids_and_slcs:
        image, cropped_bbox = crop_and_pad(_id,
                                           region,
                                           file_format,
                                           i,
                                           keypoint_pad=keypoint_pad)
        pad = pad_dim((cropped_bbox[3] - cropped_bbox[1],
                       cropped_bbox[2] - cropped_bbox[0]))
        xml_path = '{0}/{1}_{2}.xml'.format(base_xml_path, i, _id)

        if os.path.exists(xml_path):
            bboxes = parse_bbox_xml(xml_path)
            if np.sum(np.array(bboxes).ravel()) == 0:
                continue
            cropped_bboxes = []
            for box in bboxes:
                rescaled_box = [
                    box[0] - cropped_bbox[0] + pad[1],
                    box[1] - cropped_bbox[1] + pad[0],
                    box[2] - cropped_bbox[0] + pad[1],
                    box[3] - cropped_bbox[1] + pad[0]
                ]
                rescaled_box[0] = max(rescaled_box[0], pad[1])
                rescaled_box[1] = max(rescaled_box[1], pad[0])
                rescaled_box[2] = min(
                    rescaled_box[2],
                    cropped_bbox[2] - cropped_bbox[0] + pad[1])
                rescaled_box[3] = min(
                    rescaled_box[3],
                    cropped_bbox[3] - cropped_bbox[1] + pad[0])
                cropped_bboxes.append(rescaled_box)
            bboxes = np.array(cropped_bboxes).ravel()
            bboxes = np.append(bboxes, (BOX_COUNT * 4 - len(bboxes)) *
                               [0]).astype(np.int64)
        else:
            bboxes = np.array(BOX_COUNT * 4 * [0]).astype(np.int64)

        image = image.astype(np.float32)
        example = tf.train.Example(features=tf.train.Features(
            feature={
                'image':
                _bytes_feature(image.tostring()),
                'bbox':
                _bbox_feature(bboxes),
                'dim':
                tf.train.Feature(int64_list=tf.train.Int64List(
                    value=[image.shape[0]]))
            }))
        writer.write(example.SerializeToString())
        if 'train' in data_set:
            for _ in range(len(masks) - 1):
                writer.write(example.SerializeToString())
            for mask in masks:
                masked_image = (image * mask(image.shape)).astype(np.float32)
                example = tf.train.Example(features=tf.train.Features(
                    feature={
                        'image':
                        _bytes_feature(masked_image.tostring()),
                        'bbox':
                        _bbox_feature(bboxes),
                        'dim':
                        tf.train.Feature(int64_list=tf.train.Int64List(
                            value=[image.shape[0]]))
                    }))
                writer.write(example.SerializeToString())
    writer.close()
コード例 #12
0
def _create_pure_tf_record(data_set,
                           file_format,
                           region,
                           keypoint_pad,
                           random_seed,
                           example_multiple=3):
    np.random.seed(random_seed)

    ids = Table().read_table(dp.ids_by_data_set(data_set)).column('Id')

    tf_record_name = '{0}_crop_patch_pure_{1}'.format(file_format, region)
    print('Creating: {0}_{1}'.format(data_set, tf_record_name))
    writer = tf.python_io.TFRecordWriter(
        dp.tf_records(data_set, tf_record_name))

    base_xml_path = '{0}/data/bbox/{1}_merged_threats_{2}'.format(
        dp.REPO_HOME_PATH, file_format, region)

    if file_format == 'aps':
        slcs = range(16)
    if file_format == 'a3daps':
        slcs = [t for t in range(64) if t % 2 == 0]

    ids_and_slcs = []
    for _id in ids:
        for i in slcs:
            xml_path = '{0}/{1}_{2}.xml'.format(base_xml_path, i, _id)
            if os.path.exists(xml_path):
                ids_and_slcs += [(_id, i)]

    np.random.shuffle(ids_and_slcs)

    for _id, i in ids_and_slcs:
        image, cropped_bbox = crop_and_pad(_id,
                                           region,
                                           file_format,
                                           i,
                                           keypoint_pad=keypoint_pad)
        pad = pad_dim((cropped_bbox[3] - cropped_bbox[1],
                       cropped_bbox[2] - cropped_bbox[0]))
        xml_path = '{0}/{1}_{2}.xml'.format(base_xml_path, i, _id)

        if os.path.exists(xml_path):
            bboxes = parse_bbox_xml(xml_path)
            if np.sum(np.array(bboxes).ravel()) == 0:
                continue
            cropped_bboxes = []
            for box in bboxes:
                rescaled_box = [
                    box[0] - cropped_bbox[0] + pad[1],
                    box[1] - cropped_bbox[1] + pad[0],
                    box[2] - cropped_bbox[0] + pad[1],
                    box[3] - cropped_bbox[1] + pad[0]
                ]
                rescaled_box[0] = max(rescaled_box[0], pad[1])
                rescaled_box[1] = max(rescaled_box[1], pad[0])
                rescaled_box[2] = min(
                    rescaled_box[2],
                    cropped_bbox[2] - cropped_bbox[0] + pad[1])
                rescaled_box[3] = min(
                    rescaled_box[3],
                    cropped_bbox[3] - cropped_bbox[1] + pad[0])
                cropped_bboxes.append(rescaled_box)
            bboxes = np.array(cropped_bboxes).ravel()
            bboxes = np.append(bboxes, (BOX_COUNT * 4 - len(bboxes)) *
                               [0]).astype(np.int64)
        else:
            continue

        image = image.astype(np.float32)
        example = tf.train.Example(features=tf.train.Features(
            feature={
                'image':
                _bytes_feature(image.tostring()),
                'bbox':
                _bbox_feature(bboxes),
                'dim':
                tf.train.Feature(int64_list=tf.train.Int64List(
                    value=[image.shape[0]]))
            }))
        writer.write(example.SerializeToString())
    writer.close()
コード例 #13
0
def get_arcs_and_ground_truth(data_set, zones):
    ids = Table().read_table(dp.ids_by_data_set(data_set)).column('Id')

    images_tensor = tf.placeholder(tf.float32, (None, 600, 600, 1))
    model = AnchorBabyModel(batch_size=16, image_dim=(600, 600))

    logits_tensor, bboxes_tensor = model.inference(images_tensor, test=True)
    logits_tensor = tf.sigmoid(logits_tensor)
    bboxes_tensor = target_to_box(bboxes_tensor, model.anchors)
    checkpoint_dir = '/home/alon/sda1/tsa-kaggle/train_dir/train_11-25_20:14:05'  #patch crop torso w/ masks
    xml_base_path = '/home/alon/Documents/tsa-kaggle/data/bbox/aps_merged_threats/{0}_{1}.xml'
    xml_base_zone_path = '/home/alon/Documents/tsa-kaggle/data/bbox/aps_threats/zone_{2}/{0}_{1}.xml'
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(
            sess,
            tf.train.get_checkpoint_state(
                checkpoint_dir).model_checkpoint_path)
        arcs_matrix, arcs_labels, arc_ids, arc_zone_labels = [], [], [], []
        for _id in ids:
            print(_id)
            images, cropped_bbox = images_and_crop_by_id(_id)
            logits, bboxes = sess.run([logits_tensor, bboxes_tensor],
                                      feed_dict={images_tensor: images})
            arcs = build_arcs(bboxes, logits)
            for arc in arcs:
                matched = False
                for zone in zones:
                    num_matches = 0
                    original_scale_boxes = arc.original_scale_boxes(
                        cropped_bbox)
                    for i in range(original_scale_boxes.shape[0]):
                        gt_xml_path = xml_base_zone_path.format(i, _id, zone)
                        if os.path.exists(gt_xml_path):
                            gt_bboxes = np.array(parse_bbox_xml(gt_xml_path))
                            if gt_bboxes.size > 0:
                                if np.max(
                                        _iou(
                                            gt_bboxes, original_scale_boxes[i][
                                                np.newaxis])) > 0.5:
                                    num_matches += 1
                    arc_match = num_matches / np.sum(
                        np.sum(original_scale_boxes, axis=1) > 0) >= 0.5
                    if arc_match:
                        matched = True
                        arcs_matrix.append(arc.to_list())
                        arcs_labels.append(True)
                        arc_ids.append(_id)
                        arc_zone_labels.append(zone)
                        break
                if not matched:
                    arcs_matrix.append(arc.to_list())
                    arcs_labels.append(False)
                    arc_ids.append(_id)
                    arc_zone_labels.append(None)
        arcs_matrix, arcs_labels, arc_ids = np.array(arcs_matrix), np.array(
            arcs_labels), np.array(arc_ids)
        arc_zone_labels = np.array(arc_zone_labels)
        np.save(data_set + '_arc_X', arcs_matrix)
        np.save(data_set + '_arc_Y', arcs_labels)
        np.save(data_set + '_arc_ids', arc_ids)
        np.save(data_set + '_arc_zone_labels', arc_zone_labels)