예제 #1
0
    def search(self, df, params):
        skf = StratifiedKFold(n_splits=self.n_splits,
                              random_state=self.random_state)
        params_combos = self._generate_params_combos(params)
        self.top = []
        for combo in params_combos:
            print(combo)
            scores = []
            counter = 0
            for train_index, test_index in skf.split(X=df, y=df.group):
                print("{} split".format(counter))
                counter += 1
                df_train = df.iloc[train_index]
                df_test = df.iloc[test_index]

                df_neg = create_negative_examples(
                    df=df,
                    df_train=df_train,
                    num_neg_exs=10 * df_train.shape[0],
                    random_state=self.random_state)
                svd = MySVD()
                svd.set_params(**combo)

                svd.fit(df_train, df_neg)

                # transform df_test
                svd_group_encoder_dict = dict(
                    zip(svd.group_encoder.classes_,
                        list(range(len(svd.group_encoder.classes_)))))
                encoded_corpus_test = df_test.groupby("user").agg(
                    lambda x: [svd_group_encoder_dict[item]
                               for item in x]).reset_index()

                encoded_corpus_test['recs'] = svd.predict(
                    encoded_corpus_test.user.values)

                score = mean_average_precision(
                    encoded_corpus_test.group.values,
                    encoded_corpus_test.recs.values)

                print('score: {}'.format(score))
                scores.append(
                    mean_average_precision(encoded_corpus_test.group.values,
                                           encoded_corpus_test.recs.values))

            self.top.append(
                TopItem(combo=combo, score=sum(scores) / len(scores)))
        self.top = sorted(self.top, key=lambda item: item.score, reverse=True)
        return self.top
예제 #2
0
파일: train.py 프로젝트: luweishuang/IMN
        def test_step():
            results = defaultdict(list)
            num_test = 0
            num_correct = 0.0
            test_batches = data_helpers.batch_iter(test_dataset,
                                                   FLAGS.batch_size,
                                                   1,
                                                   target_loss_weight,
                                                   FLAGS.max_utter_len,
                                                   FLAGS.max_utter_num,
                                                   FLAGS.max_response_len,
                                                   shuffle=False)
            for test_batch in test_batches:
                x_utterances, x_response, x_utterances_len, x_response_len, x_utters_num, x_target, x_target_weight, id_pairs = test_batch
                feed_dict = {
                    imn.utterances: x_utterances,
                    imn.response: x_response,
                    imn.utterances_len: x_utterances_len,
                    imn.response_len: x_response_len,
                    imn.utters_num: x_utters_num,
                    imn.target: x_target,
                    imn.target_loss_weight: x_target_weight,
                    imn.dropout_keep_prob: 1.0
                }
                batch_accuracy, predicted_prob = sess.run(
                    [imn.accuracy, imn.probs], feed_dict)
                num_test += len(predicted_prob)
                if num_test % 1000 == 0:
                    print(num_test)

                num_correct += len(predicted_prob) * batch_accuracy
                for i, prob_score in enumerate(predicted_prob):
                    question_id, response_id, label = id_pairs[i]
                    results[question_id].append(
                        (response_id, label, prob_score))

            #calculate top-1 precision
            print('num_test_samples: {}  test_accuracy: {}'.format(
                num_test, num_correct / num_test))
            accu, precision, recall, f1, loss = metrics.classification_metrics(
                results)
            print('Accuracy: {}, Precision: {}  Recall: {}  F1: {} Loss: {}'.
                  format(accu, precision, recall, f1, loss))

            mvp = metrics.mean_average_precision(results)
            mrr = metrics.mean_reciprocal_rank(results)
            top_1_precision = metrics.top_1_precision(results)
            total_valid_query = metrics.get_num_valid_query(results)
            print(
                'MAP (mean average precision: {}\tMRR (mean reciprocal rank): {}\tTop-1 precision: {}\tNum_query: {}'
                .format(mvp, mrr, top_1_precision, total_valid_query))

            return mrr
예제 #3
0
    def compute_metrics():
        with torch.no_grad():
            man_pdists_np = dist_fn(X).sqrt().cpu().numpy()
        ad = average_distortion(g_pdists_np, man_pdists_np)
        if g is None:
            return ad, None

        # TODO(ccruceru): Make sure this is correct. Try to reproduce the
        # result from the ref. paper on 10D Euclidean manifold.
        man_pdists_sym = pdists_vec_to_sym(man_pdists_np, n)
        mean_ap = mean_average_precision(g, man_pdists_sym)
        return ad, mean_ap
예제 #4
0
    def search(self, df, algotype, params):
        assert algotype in ('lda', 'plsa')
        skf = StratifiedKFold(n_splits=self.n_splits,
                              random_state=self.random_state)
        params_combos = self._generate_params_combos(params)
        self.top = []
        transformer = Transformer()
        transformer.fit(df)
        for combo in params_combos:
            print(combo)
            scores = []
            counter = 0
            for train_index, test_index in skf.split(X=df, y=df.group):
                print("{} split".format(counter))
                counter += 1

                df_train = df.iloc[train_index]
                df_test = df.iloc[test_index]

                corpus_train = transformer.transform(df_train)
                corpus_test = transformer.transform(df_test)

                if algotype == 'lda':
                    est = LDAAdapter()
                else:
                    est = MyPLSA()

                est.set_params(**combo)

                if algotype == 'lda':
                    est.fit(corpus_train)
                else:
                    num_groups = df.group.nunique()
                    est.fit(corpus_train, num_groups)

                corpus_test['recs'] = est.predict(corpus_test.user.values)

                corpus_test['group'] = corpus_test.group.apply(
                    lambda x: [item[0] for item in x])

                score = mean_average_precision(corpus_test.group.values,
                                               corpus_test.recs.values)

                print('score: {}'.format(score))

                scores.append(score)

            self.top.append(
                TopItem(combo=combo, score=sum(scores) / len(scores)))

        self.top = sorted(self.top, key=lambda item: item.score, reverse=True)
        return self.top
예제 #5
0
def compute_map_gt_det(det_file, gt_file):
    det_list = read_detections(det_file)
    gt_list = read_detections(gt_file)

    frames = []

    for i in range(0, len(det_list)):
        frame = Frame(i)
        frame.detections = det_list[i]
        frame.ground_truth = gt_list[i]
        frames.append(frame)

    mAP = mean_average_precision(frames, ignore_classes=True)

    return mAP
예제 #6
0
def run_test(dir_path, op_name, sess, training, accuracy, prob, pair_ids, output_layer):
    results = defaultdict(list)
    num_test = 0
    num_correct = 0.0
    n_updates = 0
    mrr = 0
    t0 = time()
    try:
        while True:
            n_updates += 1

            batch_accuracy, predicted_prob, pair_ = sess.run([accuracy, prob, pair_ids], feed_dict={training: False})
            question_id, answer_id, label = pair_
            
            num_test += len(predicted_prob)
            num_correct += len(predicted_prob) * batch_accuracy
            for i, prob_score in enumerate(predicted_prob):
                # question_id, answer_id, label = pair_id[i]
                results[question_id[i]].append((answer_id[i], label[i], prob_score[0]))

            if n_updates%2000 == 0:
                tf.logging.info("n_update %d , %s: Mins Used: %.2f" %
                                (n_updates, op_name, (time() - t0) / 60.0))

    except tf.errors.OutOfRangeError:
        # calculate top-1 precision
        print('num_test_samples: {}  test_accuracy: {}'.format(num_test, num_correct / num_test))
        accu, precision, recall, f1, loss = metrics.classification_metrics(results)
        print('Accuracy: {}, Precision: {}  Recall: {}  F1: {} Loss: {}'.format(accu, precision, recall, f1, loss))

        mvp = metrics.mean_average_precision(results)
        mrr = metrics.mean_reciprocal_rank(results)
        top_1_precision = metrics.top_1_precision(results)
        total_valid_query = metrics.get_num_valid_query(results)
        print('MAP (mean average precision: {}\tMRR (mean reciprocal rank): {}\tTop-1 precision: {}\tNum_query: {}'.format(
            mvp, mrr, top_1_precision, total_valid_query))

        out_path = os.path.join(dir_path, "output_test.txt")
        print("Saving evaluation to {}".format(out_path))
        with open(out_path, 'w') as f:
          f.write("query_id\tdocument_id\tscore\trank\trelevance\n")
          for us_id, v in results.items():
            v.sort(key=operator.itemgetter(2), reverse=True)
            for i, rec in enumerate(v):
              r_id, label, prob_score = rec
              rank = i+1
              f.write('{}\t{}\t{}\t{}\t{}\n'.format(us_id, r_id, prob_score, rank, label))
    return mrr
예제 #7
0
def evaluate_model(model, n):
    mean_map = 0.
    mean_ndcg = 0.
    for u in range(len(user_items)):  # para cada user_id en el training
        rec = [t[0] for t in model.recommend(u, user_item_matrix, n)]
        user_item_test_list = [
            items_ids[i] for i in user_items_test[user_ids_inv[u]]
        ]  # lista con las peliculas que vio en el test
        rel_vector = [
            np.isin(user_item_test_list, rec, assume_unique=True).astype(int)
        ]
        mean_map += metrics.mean_average_precision(rel_vector)
        mean_ndcg += metrics.ndcg_at_k(rel_vector, n)

    mean_map /= len(user_items_test)
    mean_ndcg /= len(user_items_test)

    return mean_map, mean_ndcg
예제 #8
0
def main():
    r"""Main entry point in the graph embedding procedure."""
    args = config_parser().parse_args()

    g_pdists = load_pdists(args)
    n = g_pdists.shape[0]
    d = args.manifold_dim

    # we are actually using only the upper diagonal part
    g_pdists = g_pdists[np.triu_indices(n, 1)]
    g_sq_pdists = g_pdists**2

    # read the graph
    # the distortion cost
    def distortion_cost(X):
        man_sq_pdists = manifold_pdists(X, squared=True)

        return np.sum(np.abs(man_sq_pdists / g_sq_pdists - 1))

    # the manifold, problem, and solver
    manifold = PositiveDefinite(d, k=n)
    problem = Problem(manifold=manifold, cost=distortion_cost, verbosity=2)
    linesearch = ReduceLROnPlateau(start_lr=2e-2,
                                   patience=10,
                                   threshold=1e-4,
                                   factor=0.1,
                                   verbose=1)
    solver = ConjugateGradient(linesearch=linesearch, maxiter=1000)

    # solve it
    with Timer('training') as t:
        X_opt = solver.solve(problem, x=sample_init_points(n, d))

    # the distortion achieved
    man_pdists = manifold_pdists(X_opt)
    print('Average distortion: ', average_distortion(g_pdists, man_pdists))
    man_pdists_sym = pdists_vec_to_sym(man_pdists, n, 1e12)
    print('MAP: ', mean_average_precision(g,
                                          man_pdists_sym,
                                          diag_adjusted=True))
예제 #9
0
    def evalMetric(self, metric):
        score, n = 0, 0

        if self.verbose:
            print('Running evaluation')

        for w1 in self.eval:
            if w1 not in self.ranks: # skip non-existing
                continue

            # TODO: Implement desired metrics here
            if metric == 'precision_at_10':
                k = 10
                rs = [1 if w in [x[0] for x in self.eval[w1]] else 0 for (w, _) in self.ranks[w1][:k]]
#                print w1
#                print rs
#                print self.ranks[w1][:k]
#                print self.eval[w1]
                score += metrics.precision_at_k(rs, k)
            elif metric == 'map':
                rs = [1 if w in [x[0] for x in self.eval[w1]] else 0 for (w, _) in self.ranks[w1]]
#                print w1
#                print rs
#                print self.ranks[w1]
#                print self.eval[w1]
                score += metrics.mean_average_precision(rs)
            elif metric == 'ndcg_at_100':
                k = 100
                d = dict(self.eval[w1])
                rs = [d[w] if w in d else 0 for (w, s) in self.ranks[w1][:k]]
#                print w1
#                print rs
#                print self.ranks[w1][:k]
#                print self.eval[w1]
                score += metrics.ndcg_at_k(rs, k)

            n += 1

        return (score / n, n)
예제 #10
0
def run_test(epoch_no, dir_path, op_name, sess, training, accuracy, prob,
             pair_ids):
    results = defaultdict(list)
    num_test = 0
    num_correct = 0.0
    n_updates = 0
    mrr = 0
    t0 = time()
    try:
        while True:
            n_updates += 1

            batch_accuracy, predicted_prob, pair_ = sess.run(
                [accuracy, prob, pair_ids], feed_dict={training: False})
            question_id, answer_id, label = pair_

            # question_id = question_id.eval()
            # answer_id = answer_id.eval()
            # label = label.eval()
            num_test += len(predicted_prob)
            # if num_test % 1000 == 0:
            #     print(num_test)

            num_correct += len(predicted_prob) * batch_accuracy
            for i, prob_score in enumerate(predicted_prob):
                # question_id, answer_id, label = pair_id[i]
                results[question_id[i]].append(
                    (answer_id[i], label[i], prob_score[0]))

            if n_updates % 2000 == 0:
                tf.logging.info(
                    "epoch: %i  n_update %d , %s: Mins Used: %.2f" %
                    (epoch_no, n_updates, op_name, (time() - t0) / 60.0))

    except tf.errors.OutOfRangeError:

        threshold = 0.95
        none_id = 10000000
        print("threshold: {}".format(threshold))
        for q_id, a_list in results.items():
            correct_flag = 0
            for (a_id, label, score) in a_list:
                if int(label) == 1:
                    correct_flag = 1
            if correct_flag == 0:
                results[q_id].append((none_id, 1, threshold))
            else:
                results[q_id].append((none_id, 0, threshold))
        # calculate top-1 precision
        print('num_test_samples: {}  test_accuracy: {}'.format(
            num_test, num_correct / num_test))
        accu, precision, recall, f1, loss = metrics.classification_metrics(
            results)
        print(
            'Accuracy: {}, Precision: {}  Recall: {}  F1: {} Loss: {}'.format(
                accu, precision, recall, f1, loss))

        mvp = metrics.mean_average_precision(results)
        mrr = metrics.mean_reciprocal_rank(results)
        top_1_precision = metrics.top_1_precision(results)
        total_valid_query = metrics.get_num_valid_query(results)
        print(
            'MAP (mean average precision: {}\tMRR (mean reciprocal rank): {}\tTop-1 precision: {}\tNum_query: {}'
            .format(mvp, mrr, top_1_precision, total_valid_query))

        out_path = os.path.join(dir_path,
                                "ubuntu_output_epoch_{}.txt".format(epoch_no))
        print("Saving evaluation to {}".format(out_path))
        with open(out_path, 'w') as f:
            f.write("query_id\tdocument_id\tscore\trank\trelevance\n")
            for us_id, v in results.items():
                v.sort(key=operator.itemgetter(2), reverse=True)
                for i, rec in enumerate(v):
                    r_id, label, prob_score = rec
                    rank = i + 1
                    f.write('{}\t{}\t{}\t{}\t{}\n'.format(
                        us_id, r_id, prob_score, rank, label))

        global best_score
        if op_name == 'valid' and mrr > best_score:
            best_score = mrr
            saver = tf.train.Saver()
            dir_path = os.path.join(dir_path, "epoch {}".format(epoch_no))
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            saver.save(sess, dir_path)
            tf.logging.info(">> save model!")

    return mrr
예제 #11
0
        labels = list(read_labels("data/" + labelfile))
        y, X = zip(*match_labels_documents(documents, labels))
        y, X = np.array(y), np.array(X)
        kf = KFold(len(y), n_folds=10, shuffle=True, random_state=1)
        rank_scores = np.zeros(10)
        for i, (train, test) in enumerate(kf):
            X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
            labels = Counter(flatten(list(y_train)))
            labels = [label for label, count in labels.items() if count >= 1]
            model = IRSystem(k1=1.2, b=0.75, cutoff=0)
            model.fit_raw(X_train, y_train, ngram_range=(1, 1), stop_words='english', min_df=2)
            ranking = model.rank_labels(X_test, raw=True)
            ranking = ranking.tolist()
            ranking = map(lambda r: list(unique_everseen(r)), map(flatten, ranking))
            ranking, y_test = zip(*[(r, y_) for r, y_ in zip(ranking, y_test) if any(l in labels for l in y_)])
            rank_scores[i] = mean_average_precision(ranking, y_test)
        print 'IR: (%s)' % (labelfile), rank_scores.mean(), rank_scores.std()

        # Next, we'll do an IR experiment with Big Documents
        documents = {doc_id: text for doc_id, text in read_dreams("data/dreambank.en.stanford.out")}
        labels = list(read_labels("data/" + labelfile))
        y, X = zip(*match_labels_documents(documents, labels))
        y, X = np.array(y), np.array(X)
        kf = KFold(len(y), n_folds=10, shuffle=True, random_state=1)
        rank_scores = np.zeros(10)
        for i, (train, test) in enumerate(kf):
            X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
            big_docs = defaultdict(str)
            for (labels, doc) in zip(y_train, X_train):
                for label in labels:
                    big_docs[label] += " " + doc
예제 #12
0
def score_single(sk_labels, im_labels, index):
    res = np.equal(im_labels[index], sk_labels[:, None])

    prec = np.mean([precision_at_k(r, 100) for r in res])
    mAP = mean_average_precision(res)
    return prec, mAP
예제 #13
0
def main():
    video = Video("../datasets/AICity_data/train/S03/c010/vdo.avi")

    gt = read_annotations('../annotations', start_frame, end_frame)
    """
        DETECTIONS
    """
    det_algs = ['yolo3', 'mask_rcnn', 'ssd512']
    for alg in det_algs:
        detections = read_detections(
            '../datasets/AICity_data/train/S03/c010/det/det_{0}.txt'.format(
                alg))
        detections = detections[start_frame:end_frame + 1]

        frames = []

        # roi = cv2.imread('../datasets/AICity_data/train/S03/c010/roi.jpg')

        for im, f in seq(video.get_frames(
                start_frame_number=start_frame)).take(end_frame - start_frame +
                                                      1):
            f.ground_truth = gt[f.id]
            f.detections = detections[f.id]
            frames.append(f)

            if make_video:
                make_video_frame(im, f, frames)

        iou_over_time(frames)
        mAP = mean_average_precision(frames)
        print(alg, " mAP:", mAP)
    """
        DETECTIONS FROM ALTERED GROUND TRUTH 
    """
    frames = []

    for im, f in seq(video.get_frames()).take(end_frame - start_frame + 1):
        f.ground_truth = gt[f.id]
        f.detections = alter_detections(f.ground_truth)
        frames.append(f)

        if make_video:
            make_video_frame(im, f, frames)

    iou_over_time(frames)
    mAP = mean_average_precision(frames)
    print('Random alteration', " mAP:", mAP)
    """
        OPTICAL FLOW 
    """
    of_det_1 = read_optical_flow(
        '../datasets/optical_flow/detection/LKflow_000045_10.png')
    of_det_2 = read_optical_flow(
        '../datasets/optical_flow/detection/LKflow_000157_10.png')

    of_gt_1 = read_optical_flow('../datasets/optical_flow/gt/000045_10.png')
    of_gt_2 = read_optical_flow('../datasets/optical_flow/gt/000157_10.png')

    img_1 = cv2.imread('../datasets/optical_flow/img/000045_10.png')
    img_2 = cv2.imread('../datasets/optical_flow/img/000157_10.png')

    msen_of = msen(of_det_2, of_gt_2)
    pepn_of = pepn(of_det_2, of_gt_2)

    print(msen_of, pepn_of)
    show_optical_flow(of_gt_1)
    show_optical_flow_arrows(img_1, of_gt_1)

    msen_45 = msen(of_det_1, of_gt_1, plot=True)
    pepn_45 = pepn(of_det_1, of_gt_1)
    print("Sequence 045: MSEN", msen_45, "PEPN", pepn_45)

    msen_157 = msen(of_det_2, of_gt_2, plot=True)
    pepn_157 = pepn(of_det_2, of_gt_2)
    print("Sequence 157: MSEN", msen_157, "PEPN", pepn_157)

    show_optical_flow(of_gt_1)
예제 #14
0
def main():
    parser = argparse.ArgumentParser(
        description='Search the picture passed in a picture database.')

    parser.add_argument('method',
                        help='Method to use',
                        choices=method_refs.keys())
    parser.add_argument('--debug',
                        action='store_true',
                        help='Show debug plots')

    args = parser.parse_args()

    method = method_refs.get(args.method)

    video = Video("../datasets/AICity_data/train/S03/c010/frames")

    frames = []
    for im, mask, frame in method(video, **{'debug': args.debug}):
        frames.append(frame)
        #iou = frame.get_detection_iou(ignore_classes=True)
        #print(iou)

        if not args.debug:
            plt.figure(figsize=(12, 8))
            plt.subplot2grid((2, 2), (0, 0))

            im_frame_left = np.copy(im)
            for d in frame.ground_truth:
                cv2.rectangle(im_frame_left,
                              (int(d.top_left[1]), int(d.top_left[0])),
                              (int(d.get_bottom_right()[1]),
                               int(d.get_bottom_right()[0])), (255, 0, 0),
                              thickness=5)
            for d in frame.detections:
                cv2.rectangle(im_frame_left,
                              (int(d.top_left[1]), int(d.top_left[0])),
                              (int(d.get_bottom_right()[1]),
                               int(d.get_bottom_right()[0])), (0, 0, 255),
                              thickness=5)

            plt.imshow(cv2.cvtColor(im_frame_left, cv2.COLOR_BGR2RGB))
            plt.axis('off')
            plt.legend([
                Line2D([0], [0], color=(0, 0, 1)),
                Line2D([0], [0], color=(1, 0, 0)),
            ], ['GT', 'Det'],
                       loc='lower right')

            plt.subplot2grid((2, 2), (0, 1))
            m = cv2.cvtColor(np.copy(mask), cv2.COLOR_GRAY2RGB)
            for d in frame.ground_truth:
                cv2.rectangle(m, (int(d.top_left[1]), int(d.top_left[0])),
                              (int(d.get_bottom_right()[1]),
                               int(d.get_bottom_right()[0])), (0, 0, 255),
                              thickness=5)
            for d in frame.detections:
                cv2.rectangle(m, (int(d.top_left[1]), int(d.top_left[0])),
                              (int(d.get_bottom_right()[1]),
                               int(d.get_bottom_right()[0])), (255, 0, 0),
                              thickness=5)
            plt.imshow(m)
            plt.axis('off')
            plt.legend([
                Line2D([0], [0], color=(0, 0, 1)),
                Line2D([0], [0], color=(1, 0, 0)),
            ], ['GT', 'Det'],
                       loc='lower right')

            plt.subplot2grid((2, 2), (1, 0), colspan=2)
            plt.title('IoU over time' + str(frame.id))
            iou_over_time(frames, ignore_classes=True, show=False)
            axes = plt.gca()
            axes.set_xlim((0, int(2041 * .75) + 1))
            axes.set_ylim((0, 1.1))
            plt.legend()

            plt.savefig('../video/{:04d}.png'.format(frame.id))

            #plt.show()
            plt.close()

    iou_over_time(frames, ignore_classes=True)
    print('mAP:', mean_average_precision(frames, ignore_classes=True))
예제 #15
0
def off_the_shelf_yolo(tracking, debug=False, *args, **kwargs):
    video = Video("../datasets/AICity_data/train/S03/c010/frames")
    detection_transform = DetectionTransform()
    classes = utils.load_classes('../config/coco.names')
    gt = read_annotations(
        '../datasets/AICity_data/train/S03/c010/m6-full_annotation.xml')

    model = Darknet('../config/yolov3.cfg')
    model.load_weights('../weights/fine_tuned_yolo_freeze.weights')
    if torch.cuda.is_available():
        model = model.cuda()

    frames = []
    last_im = None

    model.eval()
    with torch.no_grad():
        for i, im in tqdm(enumerate(video.get_frames(start=len(video) // 4)),
                          total=len(video),
                          file=sys.stdout,
                          desc='Yolo'):
            im_tensor = detection_transform(im)

            im_tensor = im_tensor.view((-1, ) + im_tensor.size())
            if torch.cuda.is_available():
                im_tensor = im_tensor.cuda()

            detections = model.forward(im_tensor)
            detections = utils.non_max_suppression(detections,
                                                   80,
                                                   conf_thres=.6,
                                                   nms_thres=0.3)

            frame = Frame(i + (len(video) // 4))
            frame.ground_truth = gt[frame.id]

            for d in detections[0]:
                if int(d[6]) in VALID_LABELS:
                    bbox = d.cpu().numpy()
                    det = Detection(-1,
                                    classes[int(d[6])], (bbox[0], bbox[1]),
                                    width=bbox[2] - bbox[0],
                                    height=bbox[3] - bbox[1],
                                    confidence=d[5])
                    detection_transform.unshrink_detection(det)
                    frame.detections.append(det)

            if tracking is not None:
                last_frame = None if len(frames) == 0 else frames[-1]
                tracking(frame=frame,
                         im=im,
                         last_frame=last_frame,
                         last_im=last_im,
                         frames=frames,
                         debug=False)

            frames.append(frame)

            last_im = im

            if debug:
                plt.figure()
                for det in frame.detections:
                    rect = patches.Rectangle(det.top_left,
                                             det.width,
                                             det.height,
                                             linewidth=2,
                                             edgecolor='blue',
                                             facecolor='none')
                    plt.gca().add_patch(rect)
                    if tracking is None:
                        text = '{}'.format(det.label)
                    else:
                        text = '{} ~ {}'.format(det.label, det.id)
                    plt.text(det.top_left[0],
                             det.top_left[1],
                             s=text,
                             color='white',
                             verticalalignment='top',
                             bbox={
                                 'color': 'blue',
                                 'pad': 0
                             })
                plt.imshow(im)
                plt.axis('off')
                # plt.savefig('../video/video_yolo_fine_tune_good/frame_{:04d}'.format(i))
                plt.show()
                plt.close()
        # iou_over_time(frames)
        mAP = mean_average_precision(frames)
        print("YOLO mAP:", mAP)
예제 #16
0
def off_the_shelf_ssd(tracking, debug=False, **kwargs):
    if cuda.is_available():
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    gt = read_annotations(
        '../datasets/AICity_data/train/S03/c010/m6-full_annotation.xml')
    video = Video("../datasets/AICity_data/train/S03/c010/frames")
    trans = transforms.Compose(
        [transforms.Resize((300, 300)),
         transforms.ToTensor()])

    labels = (  # always index 0
        'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
        'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
        'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')

    model = build_ssd('test', 300, 21)  # initialize SSD
    model.load_weights('../weights/ssd300_mAP_77.43_v2.pth')
    if torch.cuda.is_available():
        model = model.cuda()

    frames = []

    model.eval()
    with torch.no_grad():
        for i, im in enumerate(video.get_frames()):

            im_tensor = trans(im)
            im_tensor = im_tensor.view((-1, ) + im_tensor.size())
            if torch.cuda.is_available():
                im_tensor = im_tensor.cuda()

            output = model.forward(im_tensor)
            detections = output.data

            w = im.width
            h = im.height
            frame = Frame(i)

            frame.ground_truth = gt[frame.id]

            # skip j = 0, because it's the background class
            for j in (2, 6, 7, 14):
                dets = detections[0, j, :]
                mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
                dets = torch.masked_select(dets, mask).view(-1, 5)
                if dets.size(0) == 0:
                    continue
                boxes = dets[:, 1:]
                scores = dets[:, 0].cpu().numpy()
                cls_dets = np.hstack((boxes.cpu().numpy(),
                                      scores[:,
                                             np.newaxis])).astype(np.float32,
                                                                  copy=False)
                for cls_det in cls_dets:
                    x1 = int(w * cls_det[0])
                    y1 = int(h * cls_det[1])
                    det = Detection(-1,
                                    labels[j - 1], (x1, y1),
                                    width=w * (cls_det[2] - cls_det[0]),
                                    height=h * (cls_det[3] - cls_det[1]),
                                    confidence=cls_det[4])
                    frame.detections.append(det)

            # kalman(frame)
            if tracking is not None:
                tracking(frame, frames, debug=debug)
            frames.append(frame)

            if debug:
                plt.figure()
                for det in frame.detections:
                    rect = patches.Rectangle(det.top_left,
                                             det.width,
                                             det.height,
                                             linewidth=2,
                                             edgecolor='blue',
                                             facecolor='none')
                    plt.gca().add_patch(rect)
                    plt.text(det.top_left[0],
                             det.top_left[1],
                             s='{} ~ {}'.format(det.label, det.id),
                             color='white',
                             verticalalignment='top',
                             bbox={
                                 'color': 'blue',
                                 'pad': 0
                             })
                plt.imshow(im)
                plt.axis('off')
                # plt.savefig('../video/video_ssd_KalmanID/frame_{:04d}'.format(i))
                plt.show()
                plt.close()

        #iou_over_time(frames)
        mAP = mean_average_precision(frames)
        print("SSD mAP:", mAP)
예제 #17
0
        def dev_step():
            results = defaultdict(list)
            num_test = 0
            num_correct = 0.0
            valid_batches = data_helpers.batch_iter(valid_dataset,
                                                    FLAGS.batch_size,
                                                    1,
                                                    target_loss_weight,
                                                    FLAGS.max_utter_len,
                                                    FLAGS.max_utter_num,
                                                    FLAGS.max_response_len,
                                                    charVocab,
                                                    FLAGS.max_word_length,
                                                    shuffle=False)
            for valid_batch in valid_batches:
                x_utterances, x_response, x_utterances_len, x_response_len, x_utters_num, x_responses_num, x_dist, x_target, x_target_weight, id_pairs, x_u_char, x_u_char_len, x_r_char, x_r_char_len = valid_batch
                feed_dict = {
                    u2u_imn.utterances: x_utterances,
                    u2u_imn.response: x_response,
                    u2u_imn.utterances_len: x_utterances_len,
                    u2u_imn.response_len: x_response_len,
                    u2u_imn.utters_num: x_utters_num,
                    u2u_imn.responses_num: x_responses_num,
                    u2u_imn.distance: x_dist,
                    u2u_imn.target: x_target,
                    u2u_imn.target_loss_weight: x_target_weight,
                    u2u_imn.dropout_keep_prob: 1.0,
                    u2u_imn.u_charVec: x_u_char,
                    u2u_imn.u_charLen: x_u_char_len,
                    u2u_imn.r_charVec: x_r_char,
                    u2u_imn.r_charLen: x_r_char_len
                }
                batch_accuracy, predicted_prob = sess.run(
                    [u2u_imn.accuracy, u2u_imn.probs], feed_dict)
                num_test += len(predicted_prob)
                if num_test % 1000 == 0:
                    print(num_test)

                num_correct += len(predicted_prob) * batch_accuracy
                for i, prob_score in enumerate(predicted_prob):
                    question_id, response_id, label = id_pairs[i]
                    results[question_id].append(
                        (response_id, label, prob_score))

            #calculate top-1 precision
            print('num_test_samples: {}  test_accuracy: {}'.format(
                num_test, num_correct / num_test))
            accu, precision, recall, f1, loss = metrics.classification_metrics(
                results)
            print('Accuracy: {}, Precision: {}  Recall: {}  F1: {} Loss: {}'.
                  format(accu, precision, recall, f1, loss))

            mvp = metrics.mean_average_precision(results)
            mrr = metrics.mean_reciprocal_rank(results)
            top_1_precision = metrics.top_1_precision(results)
            total_valid_query = metrics.get_num_valid_query(results)
            print(
                'MAP (mean average precision: {}\tMRR (mean reciprocal rank): {}\tTop-1 precision: {}\tNum_query: {}'
                .format(mvp, mrr, top_1_precision, total_valid_query))

            all_preds = []
            for i in range(len(results)):
                all_preds.append([r[2] for r in results[str(i)]])
            df = pd.DataFrame(all_preds,
                              columns=[
                                  'prediction_' + str(i)
                                  for i in range(len(all_preds[0]))
                              ])
            if not os.path.isdir(FLAGS.output_predictions_folder):
                os.makedirs(FLAGS.output_predictions_folder)
            with open(
                    os.path.join(FLAGS.output_predictions_folder,
                                 'config.json'), 'w') as f:
                conf = {}
                for k, v in FLAGS.__dict__['__flags'].items():
                    conf[k] = v
                conf['ranker'] = "U2U"
                conf['seed'] = str(conf['random_seed'])
                args_dict = {}
                args_dict['args'] = conf

                f.write(json.dumps(args_dict, indent=4, sort_keys=True))
            df.to_csv(FLAGS.output_predictions_folder + "/predictions.csv",
                      index=False)

            return mrr