コード例 #1
0
def worker(path):
    print('worker start ' + path)
    batch_rois = []
    batch_featuremap_inds = []
    batch_categories = []
    batch_bboxes = []
    fc_index = 0
    dataset = {}
    #'/ImageSets/DET/train_*'
    for fname in glob.glob(ILSVRC_dataset_path + path):
        print(fname)
        with open(fname, 'r') as f:
            basename = os.path.basename(fname)
            category = int(basename.split('_')[1].split('.')[0])
            content = []
            for line in f:
                if 'extra' not in line:
                    content.append(line)
            dataset[category] = content
    print(len(dataset))
    from random import randint
    while 1:
        try:
            category = randint(1, 200)
            content = dataset[category]
            n = randint(0, len(content))
            line = content[n]
            _, gt_boxes, h_w = parse_label(anno_path + line.split()[0] +
                                           '.xml')
            if len(gt_boxes) == 0:
                continue
            rois, bboxes, categories = produce_batch(
                img_path + line.split()[0] + '.JPEG', gt_boxes, h_w, category)
        except Exception:
            # print('parse label or produce batch failed: for: '+line.split()[0])
            # traceback.print_exc()
            continue
        if len(rois) <= 0:
            continue

        for i in range(len(rois)):
            batch_rois.append(rois[i])
            batch_featuremap_inds.append(fc_index)
            batch_categories.append(categories[i])
            batch_bboxes.append(bboxes[i])
        a = feature_map
        b = np.asarray(batch_rois)
        c = np.asarray(batch_featuremap_inds)
        d = np.asarray(batch_categories)
        e = np.asarray(batch_bboxes)
        f = np.zeros((len(rois), a.shape[1], a.shape[2], a.shape[3]))
        f[0] = feature_map[0]
        yield [f, b, c], [d, e]
        batch_rois = []
        batch_featuremap_inds = []
        batch_categories = []
        batch_bboxes = []
        fc_index = 0
コード例 #2
0
def worker(path, q):
    print('worker start ' + path)
    batch_rois = []
    batch_featuremap_inds = []
    batch_categories = []
    batch_bboxes = []
    fc_index = 0
    #'/ImageSets/DET/train_*'
    for fname in glob.glob(ILSVRC_dataset_path + path):
        print(fname)
        with open(fname, 'r') as f:
            basename = os.path.basename(fname)
            category = int(basename.split('_')[1].split('.')[0])
            for line in f:
                if 'extra' not in line:
                    feature_map_file = feature_map_path + line.split()[0]
                    if not os.path.exists(feature_map_file):
                        continue

                    try:
                        feature_map = np.load(feature_map_file)['fc']
                        _, gt_boxes, h_w = parse_label(anno_path +
                                                       line.split()[0] +
                                                       '.xml')
                        if len(gt_boxes) == 0:
                            continue
                        rois, bboxes, categories = produce_batch(
                            feature_map, gt_boxes, h_w, category)
                    except Exception:
                        print('parse label or produce batch failed: for: ' +
                              line.split()[0])
                        traceback.print_exc()
                        continue
                    if len(rois) <= 0:
                        continue

                    for i in range(len(rois)):
                        batch_rois.append(rois[i])
                        batch_featuremap_inds.append(fc_index)
                        batch_categories.append(categories[i])
                        batch_bboxes.append(bboxes[i])
                    a = feature_map
                    b = np.asarray(batch_rois)
                    c = np.asarray(batch_featuremap_inds)
                    d = np.asarray(batch_categories)
                    e = np.asarray(batch_bboxes)
                    f = np.zeros(
                        (len(rois), a.shape[1], a.shape[2], a.shape[3]))
                    f[0] = feature_map[0]
                    q.put([a, b, c, d, e])
                    # yield [f,b,c], [d,e]
                    batch_rois = []
                    batch_featuremap_inds = []
                    batch_categories = []
                    batch_bboxes = []
                    fc_index = 0
コード例 #3
0
def input_generator():
    batch_tiles = []
    batch_labels = []
    batch_bboxes = []
    count = 0
    while 1:
        for fname in glob.glob(ILSVRC_dataset_path + '/ImageSets/DET/train_*'):
            with open(fname, 'r') as f:
                for line in f:
                    if 'extra' not in line:
                        try:
                            category, gt_boxes, scale = parse_label(
                                anno_path + line.split()[0] + '.xml')
                            if len(gt_boxes) == 0:
                                continue
                            tiles, labels, bboxes = produce_batch(
                                img_path + line.split()[0] + '.JPEG', gt_boxes,
                                scale)
                        except Exception:
                            print(
                                'parse label or produce batch failed: for: ' +
                                line.split()[0])
                            traceback.print_exc()
                            continue
                        for i in range(len(tiles)):
                            batch_tiles.append(tiles[i])
                            batch_labels.append(labels[i])
                            batch_bboxes.append(bboxes[i])
                            if (len(batch_tiles) == BATCH_SIZE):
                                a = np.asarray(batch_tiles)
                                b = np.asarray(batch_labels)
                                c = np.asarray(batch_bboxes)
                                if not a.any() or not b.any() or not c.any():
                                    print("empty array found.")

                                yield a, [b, c]
                                batch_tiles = []
                                batch_labels = []
                                batch_bboxes = []
コード例 #4
0
def run_in_movie(data_dir, subset, algorithm, temporal_link, gpu_id):
    affinity_dir = osp.join(data_dir, 'affinity', subset, 'in')
    list_file = osp.join(data_dir, 'meta', subset + '.json')
    mid_list, meta_info = read_meta(list_file)

    average_mAP = 0
    search_count = 0
    average_top1 = 0
    average_top3 = 0
    average_top5 = 0
    for i, mid in enumerate(mid_list):
        # read data
        tnum = meta_info[mid]['num_tracklet']
        pids = meta_info[mid]['pids']
        gt_list, gt_dict = parse_label(meta_info, mid)

        # read affinity matrix
        if temporal_link:
            link_type = 'max'
        else:
            link_type = 'mean'
        ct_affmat = read_affmat_of_one_movie(affinity_dir,
                                             mid,
                                             region='face',
                                             data_type='ct',
                                             link_type=link_type)
        tt_affmat = read_affmat_of_one_movie(affinity_dir,
                                             mid,
                                             region='body',
                                             data_type='tt',
                                             link_type=link_type)

        # run algorithm
        if algorithm == 'ppcc':
            result = run_ccpp(ct_affmat, tt_affmat, gpu_id)
        elif algorithm == 'lp':
            result = run_lp(ct_affmat, tt_affmat, gpu_id)
        else:
            raise ValueError('No such algrothm: {}'.format(algorithm))

        # parse results and get performance
        ret_dict = affmat2retdict(result, pids)
        ret_list = affmat2retlist(result, pids)
        mAP = get_mAP(gt_dict, ret_dict)
        topk = get_topk(gt_list, ret_list)
        average_mAP += mAP * len(pids)
        search_count += len(pids)
        max_k = len(topk)
        if max_k < 3:
            top3 = 1
        else:
            top3 = topk[2]
        if max_k < 5:
            top5 = 1
        else:
            top5 = topk[4]
        average_top1 += topk[0]
        average_top3 += top3
        average_top5 += top5

    # get average performance
    average_mAP = average_mAP / search_count
    average_top1 = average_top1 / len(mid_list)
    average_top3 = average_top3 / len(mid_list)
    average_top5 = average_top5 / len(mid_list)
    print(
        'Average mAP: {:.4f}\tAverage top1: {:.4f}\tAverage top3: {:.4f}\tAverage top5: {:.4f}'
        .format(average_mAP, average_top1, average_top3, average_top5))
コード例 #5
0
def run_in_movie(data_dir, subset, data_type, face_ratio):
    affinity_dir = osp.join(data_dir, 'affinity', subset, 'in')
    list_file = osp.join(data_dir, 'meta', subset + '.json')
    mid_list, meta_info = read_meta(list_file)

    average_mAP = 0
    search_count = 0
    average_top1 = 0
    average_top3 = 0
    average_top5 = 0
    for i, mid in enumerate(mid_list):
        # read data
        tnum = meta_info[mid]['num_tracklet']
        pids = meta_info[mid]['pids']
        gt_list, gt_dict = parse_label(meta_info, mid)

        # read affinity matrix
        if data_type == 'face':
            affmat = read_affmat_of_one_movie(affinity_dir,
                                              mid,
                                              region='face',
                                              data_type='ct')
        elif data_type == 'body':
            affmat = read_affmat_of_one_movie(affinity_dir,
                                              mid,
                                              region='body',
                                              data_type='ct')
        else:
            face_affmat = read_affmat_of_one_movie(affinity_dir,
                                                   mid,
                                                   region='face',
                                                   data_type='ct')
            body_affmat = read_affmat_of_one_movie(affinity_dir,
                                                   mid,
                                                   region='body',
                                                   data_type='ct')
            if data_type == 'ave_fusion':
                affmat = face_ratio * face_affmat + (1 -
                                                     face_ratio) * body_affmat
            else:
                affmat = np.maximum(face_affmat, body_affmat)

        # parse results and get performance
        ret_dict = affmat2retdict(affmat, pids)
        ret_list = affmat2retlist(affmat, pids)
        mAP = get_mAP(gt_dict, ret_dict)
        topk = get_topk(gt_list, ret_list)
        average_mAP += mAP * len(pids)
        search_count += len(pids)
        max_k = len(topk)
        if max_k < 3:
            top3 = 1
        else:
            top3 = topk[2]
        if max_k < 5:
            top5 = 1
        else:
            top5 = topk[4]
        average_top1 += topk[0]
        average_top3 += top3
        average_top5 += top5

    # get average performance
    average_mAP = average_mAP / search_count
    average_top1 = average_top1 / len(mid_list)
    average_top3 = average_top3 / len(mid_list)
    average_top5 = average_top5 / len(mid_list)
    print(
        'Average mAP: {:.4f}\tAverage top1: {:.4f}\tAverage top3: {:.4f}\tAverage top5: {:.4f}'
        .format(average_mAP, average_top1, average_top3, average_top5))
コード例 #6
0
ファイル: config.py プロジェクト: xinDW/DVSR
train_mr_img_path = "data/example-data/tubulin/train/mr/"

using_batch_norm = False
train_test_data_path = None
train_valid_lr_path = None  # valid on_the_fly

valid_lr_img_path = "data/example-data/tubulin/inference/lr/"

valid_block_size = [64, 64, 64, 1]  # [depth, height, width, channels]
valid_block_overlap = 0.2

config = edict()
config.TRAIN = edict()
config.VALID = edict()

params = parse_label(label)
archi2 = params['archi2']  #['rdn', 'unet', 'dbpn']
archi1 = params['archi1']  # [None, 'dbpn' 'denoise'] # None if 1stage
loss = params['loss']  #['mse', 'mae']
factor = params['factor']
normalization = params['norm']

config.archi1 = archi1
config.archi2 = archi2
config.loss = loss

config.TRAIN.ckpt_saving_interval = 10
config.TRAIN.batch_size = 1
config.TRAIN.beta1 = 0.9
config.TRAIN.n_epoch = 500
config.TRAIN.decay_every = 50