def eval(model, split, seq_length, n_cpu, disp):
    dataset = GolfDB(data_file='data/val_split_{}.pkl'.format(split),
                     vid_dir='data/videos_160/',
                     seq_length=seq_length,
                     transform=transforms.Compose([ToTensor(),
                                                   Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
                     train=False)

    data_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=n_cpu,
                             drop_last=False)


    correct = []
    delta = []
    tolerance = []
    predictions = []
    predictions_original = []
    ground_truth = []
    for i, sample in enumerate(data_loader):
        images, labels = sample['images'][:,::step,:,:,:], sample['labels']

        # full samples do not fit into GPU memory so evaluate sample in 'seq_length' batches
        seq_length_new = int(np.ceil(seq_length/step))
        batch = 0
        while batch * (seq_length_new-1) < images.shape[1]-1:
            if (batch + 1) * (seq_length_new-1) + 1 > images.shape[1]:
                image_batch = images[:, batch * (seq_length_new-1):, :, :, :]
            else:
                image_batch = images[:, batch * (seq_length_new-1):(batch + 1) * (seq_length_new-1)+1, :, :, :]
            logits = model(image_batch.to(device))
            if batch == 0:
                probs = F.softmax(logits.data, dim=1).cpu().numpy()
            else:
                probs = np.append(probs[:-1], F.softmax(logits.data, dim=1).cpu().numpy(), 0)
            batch += 1
        gt, pp, deltas, tol, c, original = correct_preds(probs, labels.squeeze())
        if disp:
            print(i, c)

        correct.append(c)
        tolerance.append(tol)
        delta.append(deltas)
        predictions.append(pp)
        ground_truth.append(gt)
        predictions_original.append(original)

    np.savez_compressed('results/' + version_name + '.npz', array1=np.array(delta), array2=np.array(predictions),
                                                    array3=np.array(tolerance), array4=np.array(ground_truth),
                                                    array5=np.array(predictions_original))

    print(np.round(np.mean(np.array(correct),0),3))
    print(np.round(np.sqrt(np.mean(np.square(np.array(delta)/np.array(tolerance)[:,np.newaxis]),0)),3))
    print(np.round(np.std(np.array(delta)/np.array(tolerance)[:,np.newaxis],0),3))
    PCE = np.mean(correct)
    return PCE
Ejemplo n.º 2
0
def eval(model, split, seq_length, n_cpu, disp):

    dataset = StsqDB(data_file='val_split_{}.pkl'.format(split),
                     vid_dir='data/videos_40/',
                     seq_length=seq_length,
                     transform=transforms.Compose([
                         ToTensor(),
                         Normalize([0.485, 0.456, 0.406],
                                   [0.229, 0.224, 0.225])
                     ]),
                     train=False)

    data_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=n_cpu,
                             drop_last=False)

    correct = []

    for i, sample in enumerate(data_loader):
        images, labels = sample['images'], sample['labels']

        # full samples do not fit into GPU memory so evaluate sample in 'seq_length' batches
        batch = 0
        while batch * seq_length < images.shape[1]:
            if (batch + 1) * seq_length > images.shape[1]:
                image_batch = images[:, batch * seq_length:, :, :, :]
            else:
                image_batch = images[:, batch * seq_length:(batch + 1) *
                                     seq_length, :, :, :]
            logits = model(image_batch.to(device))

            if batch == 0:
                probs = F.softmax(logits.data, dim=1).to(device).numpy()
            else:
                probs = np.append(
                    probs,
                    F.softmax(logits.data, dim=1).to(device).numpy(), 0)
            batch += 1
        _, _, _, _, c = correct_preds(probs, labels.squeeze())
        if disp:
            print(i, c)
        correct.append(c)
    PCE = np.mean(correct)
    return PCE
Ejemplo n.º 3
0
def eval(model, split, seq_length, bs, n_cpu, disp):

    if use_no_element == False:
        dataset = StsqDB(
            data_file='data/no_ele/seq_length_{}/val_split_{}.pkl'.format(
                int(seq_length), split),
            vid_dir='data/videos_40/',
            seq_length=int(seq_length),
            transform=transforms.Compose([
                ToTensor(),
                Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ]),
            train=False)
    else:
        dataset = StsqDB(
            data_file='data/seq_length_{}/train_split_{}.pkl'.format(
                args.seq_length, args.split),
            vid_dir='data/videos_40/',
            seq_length=int(seq_length),
            transform=transforms.Compose([
                ToTensor(),
                Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ]),
            train=True)

    data_loader = DataLoader(dataset,
                             batch_size=int(bs),
                             shuffle=False,
                             num_workers=n_cpu,
                             drop_last=True)

    correct = []

    if use_no_element == False:
        element_correct = [[] for i in range(12)]
        element_sum = [[] for i in range(12)]
        confusion_matrix = np.zeros([12, 12], int)
    else:
        element_correct = [[] for i in range(13)]
        element_sum = [[] for i in range(13)]
        confusion_matrix = np.zeros([13, 13], int)

    for i, sample in enumerate(data_loader):
        images, labels = sample['images'].to(device), sample['labels'].to(
            device)
        logits = model(images)
        probs = F.softmax(logits.data, dim=1)  ##確率
        labels = labels.view(int(bs) * int(seq_length))
        _, c, element_c, element_s, conf = correct_preds(
            probs, labels.squeeze())
        if disp:
            print(i, c)
        correct.append(c)
        for j in range(len(element_c)):
            element_correct[j].append(element_c[j])
        for j in range(len(element_s)):
            element_sum[j].append(element_s[j])
        confusion_matrix = confusion_matrix + conf

    PCE = np.mean(correct)
    all_element_correct = np.sum(element_correct, axis=1)
    all_element_sum = np.sum(element_sum, axis=1)
    element_PCE = all_element_correct / all_element_sum
    return PCE, element_PCE, all_element_correct, all_element_sum, confusion_matrix
Ejemplo n.º 4
0
def myeval(model, split, seq_length, n_cpu, disp, stream_choice=0):
    # summaryFile = open("summary_opt_{}.txt".format(split),"w")
    videosNum = 0  # 统计验证集的视频数量
    if cfg.FRAME_13_OPEN:
        dataset = GolfDB_13(data_file=cfg.OUR_PKL_FILE_PATH,
                            json_dir=cfg.VAL_JSON_PATH,
                            dataloader_opt=cfg.DATAOPT,
                            seq_length=64,
                            train=False)
    else:
        if stream_choice == 1:
            # 评价非光流法
            dataset = GolfDB(
                data_file='/home/zqr/codes/GolfDB/data/val_split_{}.pkl'.
                format(split),
                vid_dir=cfg.VIDEO_160_PATH,
                seq_length=seq_length,
                transform=None,
                myMean=[0.485, 0.456, 0.406],
                myStd=[0.229, 0.224, 0.225],
                train=False)

        else:  # 评价光流法
            dataset = GolfDB_T(
                data_file='/home/zqr/codes/GolfDB/data/val_split_{}.pkl'.
                format(split),
                vid_dir=cfg.OPT_RESIZE_FILE_PATH,
                seq_length=seq_length,
                train=False)
    data_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=n_cpu,
                             drop_last=False)

    correct = []
    # 这三个是为了做融合用
    all_probs = []
    all_tols = []
    all_events = []

    for i, sample in enumerate(data_loader):
        videosNum += 1
        images, labels = sample['images'], sample['labels']
        # full samples do not fit into GPU memory so evaluate sample in 'seq_length' batches
        batch = 0
        while batch * seq_length < images.shape[1]:
            if (batch + 1) * seq_length > images.shape[1]:
                image_batch = images[:, batch * seq_length:, :, :, :]
            else:
                image_batch = images[:, batch * seq_length:(batch + 1) *
                                     seq_length, :, :, :]
            logits = model(image_batch.cuda())
            if batch == 0:
                probs = F.softmax(logits.data, dim=1).cpu().numpy()
            else:
                probs = np.append(probs,
                                  F.softmax(logits.data, dim=1).cpu().numpy(),
                                  0)
            batch += 1
        events, preds, _, tol, c = correct_preds(probs, labels.squeeze())
        all_probs.append(probs)
        all_tols.append(tol)
        all_events.append(events)
        # 统计信息
        # for i, item in enumerate(c):
        #     if c[i] == 0:
        #         summary[i] += 1
        # if c[0] ==0 or c[7]==0:
        #     info = str((preds - events).tolist())
        #     summaryFile.write(info)
        #     summaryFile.write(' ')
        #     summaryFile.write(tol)
        #     summaryFile.write('\n')
        # else:
        #     summaryFile.write('\n')
        if disp:
            # print(i, c)
            print("ground truth:")
            print(events)
            print("preds:")
            print(preds)
        correct.append(c)
    PFCR = np.mean(correct, axis=0)
    PCE = np.mean(correct)
    print("PCE:")
    print(PCE)
    # summaryFile.close()
    return PCE, videosNum, all_probs, all_tols, all_events, PFCR
def eval(model, split, seq_length, n_cpu, disp, steps=1):
    print("------in function")
    if not _video_interpolation:
        dataset = GolfDB(data_file='data/val_split_{}.pkl'.format(split),
                         vid_dir='data/videos_160/',
                         seq_length=seq_length,
                         transform=transforms.Compose([
                             ToTensor(),
                             Normalize([0.485, 0.456, 0.406],
                                       [0.229, 0.224, 0.225])
                         ]),
                         train=False)

    else:
        dataset = GolfDB(data_file='data/val_split_{}.pkl'.format(split),
                         vid_dir='data/videos_160/'.replace(
                             'videos_160',
                             'videos_downsampled_' + str(steps) + 'x'),
                         seq_length=seq_length,
                         transform=transforms.Compose([
                             ToTensor(),
                             Normalize([0.485, 0.456, 0.406],
                                       [0.229, 0.224, 0.225])
                         ]),
                         train=False)
        steps = 1

    data_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=n_cpu,
                             drop_last=False)

    idx_keep = np.arange(0, seq_length, steps)
    idx_erase = np.delete(np.arange(0, seq_length), idx_keep)
    correct = []
    for i, sample in enumerate(data_loader):
        images, labels = sample['images'], sample['labels']

        if steps > 1:
            #### Downsample video (temporally)
            images[:,
                   idx_erase, :, :, :] = images[:,
                                                np.repeat(idx_keep, steps -
                                                          1)[:len(idx_erase
                                                                  )], :, :, :]

        # full samples do not fit into GPU memory so evaluate sample in 'seq_length' batches
        batch = 0
        while batch * seq_length < images.shape[1]:
            if (batch + 1) * seq_length > images.shape[1]:
                image_batch = images[:, batch * seq_length:, :, :, :]
            else:
                image_batch = images[:, batch * seq_length:(batch + 1) *
                                     seq_length, :, :, :]
            logits = model(image_batch.cuda())
            if batch == 0:
                probs = F.softmax(logits.data, dim=1).cpu().numpy()
            else:
                probs = np.append(probs,
                                  F.softmax(logits.data, dim=1).cpu().numpy(),
                                  0)
            batch += 1
        if i == 176:
            print('hello')
        _, _, _, _, c = correct_preds(probs, labels.squeeze())
        if disp:
            print(i, c)
        correct.append(c)
    PCE = np.mean(correct)

    return PCE