Пример #1
0
def run(which, steps, all_labels, model, model2, pred_diff, loss_saving, which_data, save_all_results):
    print('steps: ', steps)
    assert(which == 'test')
    assert(which_data in ['all', 'bg', 'face'])

    loss_tmp = []
    pd_tmp = np.zeros((steps, 5), dtype=float)

    ts = time.time()
    for s in range(steps):
        print(s)

        video_names = list(all_labels.keys())
        video_path = os.path.join(P.CHALEARN30_ALL_DATA, video_names[s].split('.mp4')[0] + '.h5')
        video = h5.File(video_path, 'r')

        video_keys = list(video.keys())
        # frames = len(video_keys) - 1
        frames = 10
        label = all_labels[video_names[s]][:5].astype(np.float32)

        activations = np.zeros((frames, 256))

        if C.ON_GPU:
            label = to_gpu(label, device=C.DEVICE)
            activations = to_gpu(activations, device=C.DEVICE)

        # for f in range(frames):
        for f in range(10):
            data = video[str(f)][:]
            data = data.astype(np.float32)

            if C.ON_GPU:
                data = to_gpu(data, device=C.DEVICE)

            with cp.cuda.Device(C.DEVICE):
                with chainer.using_config('train', False):
                    # frame = np.expand_dims(data, 0)
                    _, activation = model(data)
                    activations[f] = activation.data

        activations = to_cpu(activations)
        activations = np.mean(activations, axis=0)
        activations = np.expand_dims(activations, 0).astype(np.float32)

        if C.ON_GPU:
            activations = to_gpu(activations, device=C.DEVICE)

        prediction = model2(activations)
        loss = mean_absolute_error(prediction[0], label)

        loss_tmp.append(float(loss.data))

        pd_tmp[s] = U.pred_diff_trait(to_cpu(prediction.data), to_cpu(label))

    pred_diff[e] = np.mean(pd_tmp, axis=0)
    loss_tmp_mean = np.mean(loss_tmp, axis=0)
    loss_saving.append(loss_tmp_mean)
    print('E %d. %s loss: ' %(e, which), loss_tmp_mean,
          ' pred diff OCEAS: ', pred_diff[e],
          ' time: ', time.time() - ts)

    U.record_loss_sanity(which, loss_tmp_mean, pred_diff[e])

    if which == 'test' and save_all_results:
        U.record_loss_all_test(loss_tmp)
def run(which,
        steps,
        which_labels,
        frames,
        model,
        optimizer,
        pred_diff,
        loss_saving,
        which_data,
        ordered=False,
        save_all_results=False,
        record_predictions=False,
        record_loss=True):
    print('steps: ', steps)
    assert (which in ['train', 'test', 'val'])
    assert (which_data in ['all', 'bg', 'face'])

    if which == 'train':
        which_batch_size = C.TRAIN_BATCH_SIZE
    elif which == 'val':
        which_batch_size = C.VAL_BATCH_SIZE
    elif which == 'test':
        which_batch_size = C.TEST_BATCH_SIZE

    loss_tmp = []
    pd_tmp = np.zeros((steps, 5), dtype=float)
    _labs = list(which_labels)

    preds = np.zeros((steps, 5), dtype=float)

    if not ordered:
        shuffle(_labs)

    ts = time.time()
    for s in range(steps):
        # HERE
        print(s)
        # HERE
        labels_selected = _labs[s * which_batch_size:(s + 1) *
                                which_batch_size]
        assert (len(labels_selected) == which_batch_size)
        labels, data, _ = D.load_data(labels_selected,
                                      which_labels,
                                      frames,
                                      which_data,
                                      resize=True,
                                      ordered=ordered)

        if C.ON_GPU:
            data = to_gpu(data, device=C.DEVICE)
            labels = to_gpu(labels, device=C.DEVICE)

        with cp.cuda.Device(C.DEVICE):
            if which == 'train':
                config = True
            else:
                config = False

            with chainer.using_config('train', config):
                if which == 'train':
                    model.cleargrads()
                prediction = model(data)

                loss = mean_absolute_error(prediction, labels)

                if which == 'train':
                    loss.backward()
                    optimizer.update()

        if record_loss:
            loss_tmp.append(float(loss.data))
            pd_tmp[s] = U.pred_diff_trait(to_cpu(prediction.data),
                                          to_cpu(labels))
        if record_predictions and which == 'test':
            preds[s] = to_cpu(prediction.data)

    if record_loss:
        pred_diff[e] = np.mean(pd_tmp, axis=0)
        loss_tmp_mean = np.mean(loss_tmp, axis=0)
        loss_saving.append(loss_tmp_mean)
        print('E %d. %s loss: ' % (e, which), loss_tmp_mean,
              ' pred diff OCEAS: ', pred_diff[e], ' time: ',
              time.time() - ts)

        U.record_loss_sanity(which, loss_tmp_mean, pred_diff[e])

        if which == 'test' and save_all_results:
            U.record_loss_all_test(loss_tmp)

    if record_predictions and which == 'test':
        U.record_all_predictions(which, preds)
Пример #3
0
def run(which, steps, which_labels, frames, model, pred_diff, loss_saving,
        which_data, trait, ordered, save_all_results, record_predictions,
        record_loss, is_resnet18, num_traits, device, loss_function,
        resnet18_pretrain):
    print('steps: ', steps)
    # assert (which in ['test', 'val'])
    # assert (which_data in ['bg', 'face'])
    if trait is not None:
        assert (trait in ['O', 'C', 'E', 'A', 'S'])

    if which == 'val':
        which_batch_size = C.VAL_BATCH_SIZE
    elif which == 'test':
        which_batch_size = C.TEST_BATCH_SIZE

    loss_tmp = []
    pd_tmp = np.zeros((steps, num_traits), dtype=float)
    _labs = list(which_labels)

    preds = np.zeros((steps, num_traits), dtype=float)

    ts = time.time()
    for s in tqdm(range(steps)):
        labels_selected = _labs[s * which_batch_size:(s + 1) *
                                which_batch_size]
        assert (len(labels_selected) == which_batch_size)

        # labels, data, _ = D.load_data(labels_selected, which_labels, frames, which_data, ordered=ordered,
        #                               is_resnet18=is_resnet18, resnet18_pretrain=resnet18_pretrain, resize=True)
        labels, data, _ = D.load_data(labels_selected,
                                      which_labels,
                                      frames,
                                      which_data,
                                      ordered=ordered,
                                      is_resnet18=is_resnet18,
                                      resnet18_pretrain=resnet18_pretrain,
                                      resize=False)

        if C.ON_GPU:
            data = torch.from_numpy(data)
            data = data.cuda(device)
            labels = torch.from_numpy(labels)
            labels = labels.cuda(device)

        model.eval()
        with torch.no_grad():
            predictions = model(data)
            loss = loss_function(predictions, labels)
            loss = loss.detach()

        if record_loss:
            loss_tmp.append(float(loss.data))
            pd_tmp[s] = U.pred_diff_trait(np.array(predictions.cpu().data),
                                          np.array(labels.cpu().data))
        if record_predictions and which == 'test':
            preds[s] = np.array(predictions.cpu().data)

    if record_loss:
        pred_diff[0] = np.mean(pd_tmp, axis=0)
        loss_tmp_mean = np.mean(loss_tmp, axis=0)
        loss_saving.append(loss_tmp_mean)
        print('E %d. %s loss: ' % (0, which), loss_tmp_mean,
              ' pred diff %s: ' % trait, pred_diff[0], ' time: ',
              time.time() - ts)

        U.record_loss_sanity(which, loss_tmp_mean, pred_diff[0])

        if which == 'test' and save_all_results:
            U.record_loss_all_test(loss_tmp, trait=True)

    if record_predictions and which == 'test':
        U.record_all_predictions(which, preds)
def run(which,
        steps,
        which_labels,
        frames,
        model,
        optimizer,
        pred_diff,
        loss_saving,
        which_data,
        trait=None,
        ordered=False,
        save_all_results=False,
        record_predictions=False,
        record_loss=True,
        is_resnet18=True,
        pretrain_resnet=PRETRAIN):
    print('steps: ', steps)
    assert (which in ['train'])
    assert (which_data in ['bg', 'face', 'all'])
    if trait is not None:
        assert (trait in ['O', 'C', 'E', 'A', 'S'])

    if which == 'train':
        which_batch_size = C.TRAIN_BATCH_SIZE

    loss_tmp = []
    pd_tmp = np.zeros((steps, num_traits), dtype=float)
    _labs = list(which_labels)

    preds = np.zeros((steps, num_traits), dtype=float)

    if not ordered:
        shuffle(_labs)

    ts = time.time()
    for s in tqdm(range(steps)):
        labels_selected = _labs[s * which_batch_size:(s + 1) *
                                which_batch_size]
        assert (len(labels_selected) == which_batch_size)
        # labels, data, _ = D.load_data(labels_selected, which_labels, frames, which_data, ordered=ordered,
        #                               is_resnet18=is_resnet18, resize=True, resnet18_pretrain=pretrain_resnet)
        labels, data, _ = D.load_data(labels_selected,
                                      which_labels,
                                      frames,
                                      which_data,
                                      ordered=ordered,
                                      is_resnet18=is_resnet18,
                                      resize=False,
                                      resnet18_pretrain=pretrain_resnet)

        if C.ON_GPU:
            data = torch.from_numpy(data)
            data = data.cuda(device)
            labels = torch.from_numpy(labels)
            labels = labels.cuda(device)

        # exp_lr_scheduler.step()

        model.train()
        optimizer.zero_grad()
        # with torch.set_grad_enabled(True):
        predictions = model(data)
        loss = loss_function(predictions, labels)
        loss.backward()
        optimizer.step()
        if bool(torch.isnan(loss)):
            print('its happening')

        if record_loss:
            loss_tmp.append(float(loss.data))
            pd_tmp[s] = U.pred_diff_trait(np.array(predictions.cpu().data),
                                          np.array(labels.cpu().data))
        if record_predictions and which == 'test':
            preds[s] = np.array(predictions.cpu().data)

    if record_loss:
        pred_diff[e] = np.mean(pd_tmp, axis=0)
        loss_tmp_mean = np.mean(loss_tmp, axis=0)
        loss_saving.append(loss_tmp_mean)
        print('E %d. %s loss: ' % (e, which), loss_tmp_mean,
              ' pred diff %s: ' % trait, pred_diff[e], ' time: ',
              time.time() - ts)

        U.record_loss_sanity(which, loss_tmp_mean, pred_diff[e])

        if which == 'test' and save_all_results:
            U.record_loss_all_test(loss_tmp, trait=True)

    if record_predictions and which == 'test':
        U.record_all_predictions(which, preds)
Пример #5
0
def run(which,
        steps,
        which_labels,
        frames,
        model,
        optimizer,
        pred_diff,
        loss_saving,
        ordered=False,
        save_all_results=False,
        twostream=False,
        same_frame=False,
        record_predictions=False,
        record_loss=True):
    print('steps: ', steps)
    assert (which in ['train', 'test', 'val'])

    if which == 'train':
        which_batch_size = C.TRAIN_BATCH_SIZE
    elif which == 'val':
        which_batch_size = C.VAL_BATCH_SIZE
    elif which == 'test':
        which_batch_size = C.TEST_BATCH_SIZE

    loss_tmp = []
    pd_tmp = np.zeros((steps, 5), dtype=float)
    _labs = list(which_labels)

    preds = np.zeros((steps, 5), dtype=float)

    if not ordered:
        shuffle(_labs)

    ts = time.time()
    for s in range(steps):
        # HERE
        if which == 'test':
            print(s)
        # HERE
        labels_selected = _labs[s * which_batch_size:(s + 1) *
                                which_batch_size]
        assert (len(labels_selected) == which_batch_size)
        # labels, data = D.load_data(labels_selected, which_labels, frames, which_data, resize=True, ordered=ordered,
        #                            twostream=twostream)
        labels_bg, bg_data, frame_num = D.load_data(labels_selected,
                                                    which_labels,
                                                    frames,
                                                    which_data='bg',
                                                    resize=True,
                                                    ordered=ordered,
                                                    twostream=twostream,
                                                    same_frame=same_frame)
        labels_face, face_data, _ = D.load_data(labels_selected,
                                                which_labels,
                                                frames,
                                                which_data='face',
                                                resize=True,
                                                ordered=ordered,
                                                twostream=twostream,
                                                frame_num=frame_num,
                                                same_frame=same_frame)

        # confirm that labels are the same
        assert (np.mean(labels_bg == labels_face) == 1.0)

        # shuffle data and labels in same order
        if which != 'test':
            shuf = np.arange(which_batch_size)
            shuffle(shuf)
            bg_data = bg_data[shuf]
            face_data = face_data[shuf]
            labels_bg = labels_bg[shuf]

        if C.ON_GPU:
            bg_data = to_gpu(bg_data, device=C.DEVICE)
            face_data = to_gpu(face_data, device=C.DEVICE)
            labels = to_gpu(labels_bg, device=C.DEVICE)

        with cp.cuda.Device(C.DEVICE):
            if which == 'train':
                config = True
            else:
                config = False

            with chainer.using_config('train', False):
                prediction_bg, bg_activations = bg_model(bg_data)
                prediction_face, face_activations = face_model(face_data)

            with chainer.using_config('train', config):
                if config:
                    model.cleargrads()
                prediction = model(bg_activations, face_activations)

                loss = mean_absolute_error(prediction, labels)

                if which == 'train':
                    loss.backward()
                    optimizer.update()

        if record_loss:
            loss_tmp.append(float(loss.data))
            pd_tmp[s] = U.pred_diff_trait(to_cpu(prediction.data),
                                          to_cpu(labels))
        if record_predictions and which == 'test':
            preds[s] = to_cpu(prediction.data)

    if record_loss:
        pred_diff[e] = np.mean(pd_tmp, axis=0)
        loss_tmp_mean = np.mean(loss_tmp, axis=0)
        loss_saving.append(loss_tmp_mean)
        print('E %d. %s loss: ' % (e, which), loss_tmp_mean,
              ' pred diff OCEAS: ', pred_diff[e], ' time: ',
              time.time() - ts)
        U.record_loss_sanity(which, loss_tmp_mean, pred_diff[e])

        if which == 'test' and save_all_results:
            U.record_loss_all_test(loss_tmp)

    if record_predictions and which == 'test':
        U.record_all_predictions(which, preds)
Пример #6
0
def run(which, steps, which_labels, frames, model, bg_model, face_model,
        optimizer, pred_diff, loss_saving, trait, ordered, save_all_results,
        twostream, same_frame, record_loss, record_predictions):
    print('steps: ', steps)
    assert (which in ['train', 'test', 'val'])
    assert (trait in ['O', 'C', 'E', 'A', 'S'])

    if which == 'train':
        which_batch_size = C.TRAIN_BATCH_SIZE
    elif which == 'val':
        which_batch_size = C.VAL_BATCH_SIZE
    elif which == 'test':
        which_batch_size = C.TEST_BATCH_SIZE

    loss_tmp = []
    pd_tmp = np.zeros((steps, 1), dtype=float)
    _labs = list(which_labels)

    preds = np.zeros((steps, 1), dtype=float)

    if not ordered:
        shuffle(_labs)

    ts = time.time()
    for s in tqdm(range(steps)):
        labels_selected = _labs[s * which_batch_size:(s + 1) *
                                which_batch_size]
        assert (len(labels_selected) == which_batch_size)

        labels_bg, bg_data, frame_num = D.load_data_single(
            labels_selected,
            which_labels,
            frames,
            which_data='bg',
            resize=True,
            ordered=ordered,
            twostream=twostream,
            same_frame=same_frame,
            trait=trait)
        labels_face, face_data, _ = D.load_data_single(labels_selected,
                                                       which_labels,
                                                       frames,
                                                       which_data='face',
                                                       resize=True,
                                                       ordered=ordered,
                                                       twostream=twostream,
                                                       frame_num=frame_num,
                                                       same_frame=same_frame,
                                                       trait=trait)

        if C.ON_GPU:
            bg_data = to_gpu(bg_data, device=C.DEVICE)
            face_data = to_gpu(face_data, device=C.DEVICE)
            labels = to_gpu(labels_bg, device=C.DEVICE)

        with cp.cuda.Device(C.DEVICE):
            if which == 'train':
                config = True
            else:
                config = False

            with chainer.using_config('train', False):
                prediction_bg, bg_activations = bg_model(bg_data)
                prediction_face, face_activations = face_model(face_data)

            with chainer.using_config('train', config):
                if config:
                    model.cleargrads()
                prediction = model(bg_activations, face_activations)

                loss = mean_absolute_error(prediction, labels)

                if which == 'train':
                    loss.backward()
                    optimizer.update()

        if record_loss:
            loss_tmp.append(float(loss.data))
            pd_tmp[s] = U.pred_diff_trait(to_cpu(prediction.data),
                                          to_cpu(labels))
        if record_predictions and which == 'test':
            preds[s] = to_cpu(prediction.data)

    if record_loss:
        pred_diff[0] = np.mean(pd_tmp, axis=0)
        loss_tmp_mean = np.mean(loss_tmp, axis=0)
        loss_saving.append(loss_tmp_mean)
        print('E %d. %s loss: ' % (0, which), loss_tmp_mean,
              ' pred diff %s: ' % trait, pred_diff[0], ' time: ',
              time.time() - ts)

        U.record_loss_sanity(which, loss_tmp_mean, pred_diff[0])

        if which == 'test' and save_all_results:
            U.record_loss_all_test(loss_tmp, trait=True)

    if record_predictions and which == 'test':
        U.record_all_predictions(which, preds)