Esempio n. 1
0
def generateAnchors(train_annotation_folder, train_image_folder,
                    train_cache_file, model_labels):

    print("Generating anchor boxes for training images and annotation...")
    num_anchors = 9

    train_imgs, train_labels = parse_voc_annotation(train_annotation_folder,
                                                    train_image_folder,
                                                    train_cache_file,
                                                    model_labels)

    # run k_mean to find the anchors
    annotation_dims = []
    for image in train_imgs:

        for obj in image['object']:
            relative_w = (float(obj['xmax']) -
                          float(obj['xmin'])) / image['width']
            relatice_h = (float(obj["ymax"]) -
                          float(obj['ymin'])) / image['height']
            annotation_dims.append(tuple(map(float, (relative_w, relatice_h))))

    annotation_dims = np.array(annotation_dims)
    centroids = run_kmeans(annotation_dims, num_anchors)

    # write anchors to file
    print('Average IOU for', num_anchors, 'anchors:',
          '%0.2f' % avg_IOU(annotation_dims, centroids))

    anchors = centroids.copy()

    widths = anchors[:, 0]
    sorted_indices = np.argsort(widths)

    anchor_array = []
    reverse_anchor_array = []
    out_string = ""
    r = "anchors: ["
    for i in sorted_indices:
        anchor_array.append(int(anchors[i, 0] * 416))
        anchor_array.append(int(anchors[i, 1] * 416))

        out_string += str(int(anchors[i, 0] * 416)) + ',' + str(
            int(anchors[i, 1] * 416)) + ', '

    reverse_count = len(anchor_array) - 1
    while (reverse_count > -1):
        reverse_anchor_array.append(anchor_array[reverse_count])
        reverse_count -= 1

    print("Anchor Boxes generated.")
    return anchor_array, reverse_anchor_array
Esempio n. 2
0
def _main_(args):
    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    ###############################
    #   Create the validation generator
    ###############################
    valid_ints, labels = parse_voc_annotation(
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['valid']['cache_name'],
        config['model']['labels'])

    labels = labels.keys() if len(
        config['model']['labels']) == 0 else config['model']['labels']
    labels = sorted(labels)

    valid_generator = BatchGenerator(
        instances=valid_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=0,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.0,
        norm=normalize)

    ###############################
    #   Load the model and do evaluation
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']

    infer_model = load_model(config['train']['saved_weights_name'])

    # compute mAP for all the classes
    average_precisions = evaluate(infer_model, valid_generator)

    # print the score
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))