def Evaluate_upperbound_rdmuser(self):
        F1s = 0
        n_notselected_seq = 0
        widgets = [' -- [ ',progressbar.Counter(), '|', str(self.dataset_size), ' ] ',
               progressbar.Bar(),  ' name:  ', progressbar.FormatLabel(''),
               ' F1s: ', progressbar.FormatLabel(''),
               ' (', progressbar.ETA(), ' ) ']

        pbar = progressbar.ProgressBar(max_value=self.dataset_size, widgets=widgets)
        pbar.start()

        #FIXME This process is problematic and needs update!
        for video_idx, (s_name, s_groundtruth) in enumerate(zip(self.videonames, self.groundtruthscores)):
            n_frames = s_groundtruth.shape[0]

            n_users = s_groundtruth.shape[1]
            select_user_id = np.random.choice(n_users)
            pred_scores = s_groundtruth[:,select_user_id]

            s_F1, _, _ = sum_tools.evaluate_summary(pred_scores, s_groundtruth.transpose(), self.eval_metrics)

            F1s += s_F1
            widgets[-6] = progressbar.FormatLabel('{:s}'.format(s_name))
            widgets[-4] = progressbar.FormatLabel('{:.4f}'.format(s_F1))
            pbar.update(video_idx)
            # print(s_F1)

        if n_notselected_seq > 0:
            print("not selected sequence:{:d}".format(n_notselected_seq))

        return F1s/self.dataset_size
    def Evaluate(self, model, use_cuda=True):

        F1s = 0
        n_notselected_seq = 0
        widgets = [
            ' -- [ ',
            progressbar.Counter(), '|',
            str(self.dataset_size), ' ] ',
            progressbar.Bar(), ' name:  ',
            progressbar.FormatLabel(''), ' F1s: ',
            progressbar.FormatLabel(''), ' (',
            progressbar.ETA(), ' ) '
        ]

        pbar = progressbar.ProgressBar(max_value=self.dataset_size,
                                       widgets=widgets)
        pbar.start()

        #FIXME This process is problematic and needs update!
        for video_idx, (s_name, s_feature, s_groundtruth01score) in enumerate(
                zip(self.videonames, self.videofeatures,
                    self.groundtruth01scores)):
            n_frames = s_feature.shape[0]

            pred_segments = []
            pred_scores = []
            for s_sample_rate in self.sample_rate:
                sample_rate_feature = s_feature[::s_sample_rate, :]
                sample_rate_nframes = sample_rate_feature.shape[0]

                startingBounds = 0
                if sample_rate_nframes < self.seq_len:
                    n_notselected_seq += 1
                else:
                    isInbound = True
                    proposedSegments = []
                    while startingBounds < sample_rate_nframes and isInbound:
                        endingBounds = startingBounds + self.seq_len
                        if endingBounds >= sample_rate_nframes:
                            isInbound = False
                            endingBounds = sample_rate_nframes
                            startingBounds = endingBounds - self.seq_len
                        proposedSegments.append([startingBounds, endingBounds])
                        startingBounds += int(
                            (1 - self.overlap) * self.seq_len)

                    # TODO Here could also be of change: record the clips and dynamic programming based on non-overlap segments and scores...
                    for s_proposed_segment in proposedSegments:
                        startIdx = s_proposed_segment[0]
                        endIdx = s_proposed_segment[1]
                        assert endIdx - startIdx == self.seq_len, "distance between startIdx and endIdx should be seq_len:{:d},{:d},{:d}".format(
                            endIdx, startIdx, self.seq_len)
                        s_clip_feature = Variable(torch.FloatTensor(
                            sample_rate_feature[startIdx:endIdx, :]),
                                                  requires_grad=False)
                        if use_cuda:
                            s_clip_feature = s_clip_feature.cuda()

                        s_clip_feature = s_clip_feature.permute(1,
                                                                0).unsqueeze(0)

                        _, head_positions, _, tail_positions, cls_scores, _ = model(
                            s_clip_feature)
                        head_positions, tail_positions = helper.switch_positions(
                            head_positions, tail_positions)
                        # correct ones:
                        head_positions = (head_positions +
                                          startIdx) * s_sample_rate
                        tail_positions = (tail_positions +
                                          startIdx) * s_sample_rate

                        head_positions = head_positions.squeeze(0)
                        tail_positions = tail_positions.squeeze(0)
                        cls_scores = cls_scores.squeeze(0)

                        pred_positions = torch.stack(
                            [head_positions, tail_positions], dim=-1)
                        # cls_scores = F.softmax(cls_scores, dim=-1)[:, -1]
                        cls_scores = F.hardtanh(
                            cls_scores, min_val=0,
                            max_val=1).contiguous().view(-1)

                        pred_segments.append(pred_positions.data.cpu().numpy())
                        pred_scores.append(cls_scores.data.cpu().numpy())

            #FIXME: debug here!
            pred_segments = np.concatenate(pred_segments)
            pred_scores = np.concatenate(pred_scores)
            updated_segments, updated_scores, picks = NMS.non_maxima_supression(
                pred_segments, pred_scores)
            selected_segments = rep_conversions.selecteTopSegments(
                updated_segments, updated_scores, n_frames)
            pred_framescores = rep_conversions.keyshots2frame01scores(
                selected_segments, n_frames)

            s_F1, _, _ = sum_tools.evaluate_summary(
                pred_framescores, s_groundtruth01score.reshape([1, -1]),
                self.eval_metrics)

            F1s += s_F1
            widgets[-6] = progressbar.FormatLabel('{:s}'.format(s_name))
            widgets[-4] = progressbar.FormatLabel('{:.4f}'.format(s_F1))
            pbar.update(video_idx)

        if n_notselected_seq > 0:
            print("not selected sequence:{:d}".format(n_notselected_seq))

        return F1s / self.dataset_size
Example #3
0
        for user_idx in range(n_users):
            s_user_summary = user_summary[user_idx, :]
            input_var = Variable(video_features)
            target_var = Variable(s_user_summary)
            _, preds, _ = model(input_var, useCuda=useCuda)
            preds = preds.squeeze(1)
            loss = criterion(preds, target_var)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            probs = F.softmax(preds, dim=1)
            output = convertsoftmaxTo01(probs.data.cpu().numpy())
            machine_summary = vsum_tools.generate_summary(
                output, pdefinedCPS, n_frames, pdefined_NFPS, positions)
            s_F1_score, _, _ = vsum_tools.evaluate_summary(
                machine_summary, full_user_summary, eval_method)
            s_loss = loss.data.cpu().numpy()[0]
            train_idx += 1
            average_score += s_F1_score
            average_loss += s_loss
            # print "epoch: {:d}\tvideo:{:d},user:{:d}: Loss: {:.4f}({:.4f})\t Summary Performance: {:.4f}({:.4f})".format(epoch_idx, video_idx, user_idx, s_loss, average_loss/train_idx, s_F1_score, average_score/train_idx)
    print(
        "---- Train Summary: Epoch {:d}, LR: {:.6f}, Epoch Loss: {:.4f}, Epoch Precision: {:.4f}"
        .format(epoch_idx, optimizer.param_groups[0]['lr'],
                average_loss / (train_idx), average_score / (train_idx)))
    epoch_train_F1scores.append(average_score / train_idx)
    epoch_train_losses.append(average_loss / train_idx)

    # test

    average_score = 0
    def Evaluate(self, model, use_cuda=True):
        F1s = 0
        n_notselected_seq = 0
        widgets = [
            ' -- [ ',
            progressbar.Counter(), '|',
            str(self.dataset_size), ' ] ',
            progressbar.Bar(), ' name:  ',
            progressbar.FormatLabel(''), ' F1s: ',
            progressbar.FormatLabel(''), ' (',
            progressbar.ETA(), ' ) '
        ]

        pbar = progressbar.ProgressBar(max_value=self.dataset_size,
                                       widgets=widgets)
        pbar.start()

        for video_idx, (s_name, s_feature, s_groundtruth, s_cps, s_nfps,
                        s_n_frames) in enumerate(
                            zip(self.videonames, self.segment_features,
                                self.groundtruthscores, self.video_cps,
                                self.video_nfps, self.video_n_frames)):
            # pad s_feature to max_input_len
            [s_n_segment, fea_dim] = s_feature.shape
            s_feature_pad = np.zeros([self.max_input_len, fea_dim])
            s_feature_pad[:s_n_segment, :] = s_feature
            s_feature = s_feature_pad

            s_feature = Variable(torch.FloatTensor(s_feature),
                                 requires_grad=False)
            if use_cuda:
                s_feature = s_feature.cuda()

            s_feature = s_feature.permute(1, 0).unsqueeze(0)

            pointer_probs, pointer_positions, cls_scores, _ = model(s_feature)

            cls_scores = cls_scores.contiguous().squeeze(2).squeeze(0)
            pred_scores = cls_scores.data.cpu().numpy()
            pointer_positions = pointer_positions.squeeze(0)
            pred_positions = pointer_positions.data.cpu().numpy()
            pred_positions = pred_positions.astype('int')

            pred_segment_scores = np.zeros([s_n_segment])
            pred_positions = pointer_positions.data.cpu().numpy()
            pred_segment_scores[pred_positions] = pred_scores

            pred_framescores = sum_tools.generate_summary(pred_segment_scores,
                                                          s_cps,
                                                          s_n_frames,
                                                          list(s_nfps),
                                                          proportion=0.15)
            s_F1, _, _ = sum_tools.evaluate_summary(pred_framescores,
                                                    s_groundtruth.transpose(),
                                                    self.eval_metrics)

            F1s += s_F1
            widgets[-6] = progressbar.FormatLabel('{:s}'.format(s_name))
            widgets[-4] = progressbar.FormatLabel('{:.4f}'.format(s_F1))
            pbar.update(video_idx)

        if n_notselected_seq > 0:
            print("not selected sequence:{:d}".format(n_notselected_seq))

        return F1s / self.dataset_size