Ejemplo n.º 1
0
def predict():
    if request.method == 'GET':
        try:
            url = request.args.get('q')
            app.logger.debug('url provided - %s', url)
            input_tensor = transform_image(read_file(url=url))
            values, indices = get_topk(input_tensor)
            results = render_prediction(values, indices)
            return jsonify(results=results)

        except:
            app.logger.debug("Error: %s", traceback.print_exc())
            return jsonify("invalid image url")

    elif request.method == 'POST':
        try:
            file = request.files['file']
            app.logger.debug('file uploaded - %s', file)
            url = request.form.get("url", None)
            app.logger.debug('url provided - %s', url)

            input_tensor = transform_image(read_file(upload=file, url=url))
            values, indices = get_topk(input_tensor)
            results = render_prediction(values, indices)
            return jsonify(results=results)

        except:
            app.logger.debug("Error: %s", traceback.print_exc())
            return jsonify("invalid image")

    else:
        app.logger.debug("Error: %s", traceback.print_exc())
        return jsonify('invalid request')
def run_across_movie(data_dir, subset, algorithm, temporal_link, gpu_id):
    affinity_dir = osp.join(data_dir, 'affinity', subset, 'across')
    list_file = osp.join(data_dir, 'meta', 'across_{}.json'.format(subset))
    pids, gt_list, gt_dict = read_across_movie_meta(list_file)

    # read affinity matrix
    if temporal_link:
        link_type = 'max'
    else:
        link_type = 'mean'
    ct_affmat = read_affmat_across_movies(affinity_dir,
                                          region='face',
                                          data_type='ct',
                                          link_type=link_type)
    tt_affmat = read_affmat_across_movies(affinity_dir,
                                          region='body',
                                          data_type='tt',
                                          link_type=link_type)

    # run algorithm
    if algorithm == 'ppcc':
        result = run_ccpp(ct_affmat, tt_affmat, gpu_id)
    elif algorithm == 'lp':
        result = run_lp(ct_affmat, tt_affmat, gpu_id)
    else:
        raise ValueError('No such algrothm: {}'.format(algorithm))

    # parse results and get performance
    ret_dict = affmat2retdict(result, pids)
    ret_list = affmat2retlist(result, pids)
    mAP = get_mAP(gt_dict, ret_dict)
    topk = get_topk(gt_list, ret_list)

    print('mAP: {:.4f}\ttop1: {:.4f}\ttop3: {:.4f}\ttop5: {:.4f}'.format(
        mAP, topk[0], topk[2], topk[4]))
Ejemplo n.º 3
0
def run_across_movie(data_dir, subset, data_type, face_ratio):
    affinity_dir = osp.join(data_dir, 'affinity', subset, 'across')
    list_file = osp.join(data_dir, 'meta', 'across_{}.json'.format(subset))
    pids, gt_list, gt_dict = read_across_movie_meta(list_file)

    # read affinity matrix
    if data_type == 'face':
        affmat = read_affmat_across_movies(affinity_dir,
                                           region='face',
                                           data_type='ct')
    elif data_type == 'body':
        affmat = read_affmat_across_movies(affinity_dir,
                                           region='body',
                                           data_type='ct')
    else:
        face_affmat = read_affmat_across_movies(affinity_dir,
                                                region='face',
                                                data_type='ct')
        body_affmat = read_affmat_across_movies(affinity_dir,
                                                region='body',
                                                data_type='ct')
        if data_type == 'ave_fusion':
            affmat = face_ratio * face_affmat + (1 - face_ratio) * body_affmat
        else:
            affmat = np.maximum(face_affmat, body_affmat)

    # parse results and get performance
    ret_dict = affmat2retdict(affmat, pids)
    ret_list = affmat2retlist(affmat, pids)
    mAP = get_mAP(gt_dict, ret_dict)
    topk = get_topk(gt_list, ret_list)

    print('mAP: {:.4f}\ttop1: {:.4f}\ttop3: {:.4f}\ttop5: {:.4f}'.format(
        mAP, topk[0], topk[2], topk[4]))
def run_in_movie(data_dir, subset, algorithm, temporal_link, gpu_id):
    affinity_dir = osp.join(data_dir, 'affinity', subset, 'in')
    list_file = osp.join(data_dir, 'meta', subset + '.json')
    mid_list, meta_info = read_meta(list_file)

    average_mAP = 0
    search_count = 0
    average_top1 = 0
    average_top3 = 0
    average_top5 = 0
    for i, mid in enumerate(mid_list):
        # read data
        tnum = meta_info[mid]['num_tracklet']
        pids = meta_info[mid]['pids']
        gt_list, gt_dict = parse_label(meta_info, mid)

        # read affinity matrix
        if temporal_link:
            link_type = 'max'
        else:
            link_type = 'mean'
        ct_affmat = read_affmat_of_one_movie(affinity_dir,
                                             mid,
                                             region='face',
                                             data_type='ct',
                                             link_type=link_type)
        tt_affmat = read_affmat_of_one_movie(affinity_dir,
                                             mid,
                                             region='body',
                                             data_type='tt',
                                             link_type=link_type)

        # run algorithm
        if algorithm == 'ppcc':
            result = run_ccpp(ct_affmat, tt_affmat, gpu_id)
        elif algorithm == 'lp':
            result = run_lp(ct_affmat, tt_affmat, gpu_id)
        else:
            raise ValueError('No such algrothm: {}'.format(algorithm))

        # parse results and get performance
        ret_dict = affmat2retdict(result, pids)
        ret_list = affmat2retlist(result, pids)
        mAP = get_mAP(gt_dict, ret_dict)
        topk = get_topk(gt_list, ret_list)
        average_mAP += mAP * len(pids)
        search_count += len(pids)
        max_k = len(topk)
        if max_k < 3:
            top3 = 1
        else:
            top3 = topk[2]
        if max_k < 5:
            top5 = 1
        else:
            top5 = topk[4]
        average_top1 += topk[0]
        average_top3 += top3
        average_top5 += top5

    # get average performance
    average_mAP = average_mAP / search_count
    average_top1 = average_top1 / len(mid_list)
    average_top3 = average_top3 / len(mid_list)
    average_top5 = average_top5 / len(mid_list)
    print(
        'Average mAP: {:.4f}\tAverage top1: {:.4f}\tAverage top3: {:.4f}\tAverage top5: {:.4f}'
        .format(average_mAP, average_top1, average_top3, average_top5))
        test_mi_f1 = f1_score(y_true, y_pred, average='micro')
        test_ma_f1 = f1_score(y_true, y_pred, average='macro')
        test_accuracy = correct/total
        writer.add_scalar('accuracy_test/accuracy_test', test_accuracy, global_batch_counter_test)
        writer.add_scalar('accuracy_test/micro_f1_test', test_mi_f1, global_batch_counter_test)
        writer.add_scalar('accuracy_test/macro_f1_test', test_ma_f1, global_batch_counter_test)
        print('[%d] epoch, [%.3f] training loss, [%.3f] testing loss, [%.3f] testing accuracy'
              %(epoch, loss_train/len(train_loader), loss_test/test_idx, test_accuracy))

        print('Saving models and optimizer...')
        save_model_optimizer(model, optimizer, epoch, global_batch_counter_train, global_batch_counter_test, dir)
        print('Saved!')

        if bootstrapping_start_epoch and topk_pre_class*num_labels <= len(test_set) * bootstrapping_max_usage:
            print('Bootstrapping...')
            extra_train_set = get_topk(logits, topk_pre_class, y_pred, test_set)
            train_set = ConcatDataset([raw_train_set, extra_train_set])
            train_loader = DataLoader(train_set, sampler=RandomSampler(train_set), batch_size=batch_size_train)
            topk_pre_class = int(len(train_set)//num_labels * bootstrapping_increase_coef)

        loss_train = 0
        model.train()

    elif not save_freq_n_epoch:
        print('Saving models and optimizer...')
        save_model_optimizer(model, optimizer, epoch, global_batch_counter_train, global_batch_counter_test, dir)
        print('Saved!')

    if DEBUG:
        break
Ejemplo n.º 6
0
def run_in_movie(data_dir, subset, data_type, face_ratio):
    affinity_dir = osp.join(data_dir, 'affinity', subset, 'in')
    list_file = osp.join(data_dir, 'meta', subset + '.json')
    mid_list, meta_info = read_meta(list_file)

    average_mAP = 0
    search_count = 0
    average_top1 = 0
    average_top3 = 0
    average_top5 = 0
    for i, mid in enumerate(mid_list):
        # read data
        tnum = meta_info[mid]['num_tracklet']
        pids = meta_info[mid]['pids']
        gt_list, gt_dict = parse_label(meta_info, mid)

        # read affinity matrix
        if data_type == 'face':
            affmat = read_affmat_of_one_movie(affinity_dir,
                                              mid,
                                              region='face',
                                              data_type='ct')
        elif data_type == 'body':
            affmat = read_affmat_of_one_movie(affinity_dir,
                                              mid,
                                              region='body',
                                              data_type='ct')
        else:
            face_affmat = read_affmat_of_one_movie(affinity_dir,
                                                   mid,
                                                   region='face',
                                                   data_type='ct')
            body_affmat = read_affmat_of_one_movie(affinity_dir,
                                                   mid,
                                                   region='body',
                                                   data_type='ct')
            if data_type == 'ave_fusion':
                affmat = face_ratio * face_affmat + (1 -
                                                     face_ratio) * body_affmat
            else:
                affmat = np.maximum(face_affmat, body_affmat)

        # parse results and get performance
        ret_dict = affmat2retdict(affmat, pids)
        ret_list = affmat2retlist(affmat, pids)
        mAP = get_mAP(gt_dict, ret_dict)
        topk = get_topk(gt_list, ret_list)
        average_mAP += mAP * len(pids)
        search_count += len(pids)
        max_k = len(topk)
        if max_k < 3:
            top3 = 1
        else:
            top3 = topk[2]
        if max_k < 5:
            top5 = 1
        else:
            top5 = topk[4]
        average_top1 += topk[0]
        average_top3 += top3
        average_top5 += top5

    # get average performance
    average_mAP = average_mAP / search_count
    average_top1 = average_top1 / len(mid_list)
    average_top3 = average_top3 / len(mid_list)
    average_top5 = average_top5 / len(mid_list)
    print(
        'Average mAP: {:.4f}\tAverage top1: {:.4f}\tAverage top3: {:.4f}\tAverage top5: {:.4f}'
        .format(average_mAP, average_top1, average_top3, average_top5))