def _oneshot_forward_video(self, video_idx, save_logits):
    with Timer():
      # finetune on first frame
      print("--------------------------------This mine change-------------------------")
      print("------------------------OnlineAdaptingForwarder finetune ---------------------")
      self._finetune(video_idx, n_finetune_steps=self.n_finetune_steps)

      network = self.engine.test_network
      targets = network.raw_labels
      ys = network.y_softmax
      ys = self._adjust_results_to_targets(ys, targets)
      data = self.val_data

      n, measures, ys_argmax_val, logits_val, targets_val = self._process_forward_minibatch(
        data, network, save_logits, self.save_oneshot, targets, ys, start_frame_idx=0)
      assert n == 1
      
      n_frames = data.num_examples_per_epoch()
      print("-------------n_frames: ",n_frames)
      measures_video = []

      last_mask = targets_val[0]
      kframe_lst=[0]
      for t in xrange(1, n_frames):
        print("frameth:",t)
        def get_posteriors():
          n_, _, _, logits_val_, _ = self._process_forward_minibatch(
              data, network, save_logits=False, save_results=False, targets=targets, ys=ys, start_frame_idx=t)
          assert n_ == 1
          return logits_val_[0]

        # recover annotation data with results
        

        # forward current frame using adapted model
        print ("-----------------adapt--------------------") 
        negatives,kframe_lst = self._adapt(video_idx, t, last_mask, get_posteriors,kframe_lst)
        print kframe_lst
        n, measures, ys_argmax_val, posteriors_val, targets_val = self._process_forward_minibatch(
            data, network, save_logits, self.save_oneshot, targets, ys, start_frame_idx=t)
        
        self.train_data._videos[video_idx][t]["label"]=ys_argmax_val[0]
       

        assert n == 1
        assert len(measures) == 1
        measure = measures[0]
        print >> log.v5, "frame", t, ":", measure
        measures_video.append(measure)
        #print("measures_video",measures_video)
        last_mask = ys_argmax_val[0]

        # prune negatives from last mask
        # negatives are None if we think that the target is lost
        if negatives is not None and self.use_negatives:
          last_mask[negatives] = 0

      measures_video[:-1] = measures_video[:-1]
      measures_video = average_measures(measures_video)
      print >> log.v1, "sequence", video_idx + 1, data.video_tag(video_idx), measures_video
示例#2
0
 def forward(self, network, data, save_results=True, save_logits=False):
     batch_size = self.config.int("batch_size", -1)
     assert batch_size != -1
     values = list(data.create_input_tensors_dict(batch_size).values())
     tf.train.start_queue_runners(self.session)
     n_runs = 2000
     print("batch_size", batch_size, file=log.v1)
     with Timer("%s runs tooks" % n_runs):
         for idx in range(n_runs):
             print(idx, "/", n_runs, file=log.v5)
             self.session.run(values)
示例#3
0
  def _oneshot_forward_video(self, video_idx, save_logits):
    with Timer():
      # finetune on first frame
      self._finetune(video_idx, n_finetune_steps=self.n_finetune_steps)

      network = self.engine.test_network
      targets = network.raw_labels
      ys = network.y_softmax
      ys = self._adjust_results_to_targets(ys, targets)
      data = self.val_data

      n, measures, ys_argmax_val, logits_val, targets_val = self._process_forward_minibatch(
        data, network, save_logits, self.save_oneshot, targets, ys, start_frame_idx=0)
      assert n == 1
      n_frames = data.num_examples_per_epoch()

      measures_video = []

      last_mask = targets_val[0]

      for t in range(1, n_frames):
        def get_posteriors():
          n_, _, _, logits_val_, _ = self._process_forward_minibatch(
              data, network, save_logits=False, save_results=False, targets=targets, ys=ys, start_frame_idx=t)
          assert n_ == 1
          return logits_val_[0]

        # online adaptation to current frame
        negatives = self._adapt(video_idx, t, last_mask, get_posteriors)

        # forward current frame using adapted model
        n, measures, ys_argmax_val, posteriors_val, targets_val = self._process_forward_minibatch(
            data, network, save_logits, self.save_oneshot, targets, ys, start_frame_idx=t)
        assert n == 1
        assert len(measures) == 1
        measure = measures[0]
        print("frame", t, ":", measure, file=log.v5)
        measures_video.append(measure)
        last_mask = ys_argmax_val[0]

        # prune negatives from last mask
        # negatives are None if we think that the target is lost
        if negatives is not None and self.use_negatives:
          last_mask[negatives] = 0

      measures_video[:-1] = measures_video[:-1]
      measures_video = average_measures(measures_video)
      print("sequence", video_idx + 1, data.video_tag(video_idx), measures_video, file=log.v1)
示例#4
0
    def _oneshot_forward_video(self, video_idx, save_logits):
        with Timer():
            # Test Network Variables + Resize output to same shape of Labels
            network = self.engine.test_network
            targets = network.raw_labels
            ys = network.y_softmax
            ys = self._adjust_results_to_targets(ys, targets)
            data = self.val_data

            # Process minibatch forward for first frame
            n, measures, ys_argmax_val, logits_val, targets_val = self._process_forward_minibatch(
                data,
                network,
                save_logits,
                self.save_oneshot,
                targets,
                ys,
                start_frame_idx=0)
            last_mask = targets_val[0]

            assert n == 1
            n_frames = data.num_examples_per_epoch()

            measures_video = []
            measures_video.append(measures[0])
            for t in xrange(0, n_frames):
                # Compute IoU measures
                n, measures, ys_argmax_val, posteriors_val, targets_val = self._process_forward_minibatch(
                    data,
                    network,
                    save_logits,
                    self.save_oneshot,
                    targets,
                    ys,
                    start_frame_idx=t)
                assert n == 1
                assert len(measures) == 1
                measure = measures[0]
                print >> log.v5, "frame", t, ":", measure

                measures_video.append(measure)

            #measures_video[:-1] = measures_video[:-1]
            measures_video = average_measures(measures_video)
            print >> log.v1, "sequence", video_idx + 1, data.video_tag(
                video_idx), measures_video
    def _oneshot_forward_video(self, video_idx, save_logits):
        with Timer():
            # Test Network Variables + Resize output to same shape of Labels
            network = self.engine.test_network
            targets = network.raw_labels
            ys = network.y_softmax
            ys = self._adjust_results_to_targets(ys, targets)
            data = self.val_data

            # Process minibatch forward for first frame
            n, measures, ys_argmax_val, logits_val, targets_val = self._process_forward_minibatch(
                data,
                network,
                save_logits,
                self.save_oneshot,
                targets,
                ys,
                start_frame_idx=0)
            last_mask = targets_val[0]

            assert n == 1
            n_frames = data.num_examples_per_epoch()

            measures_video = []
            measures_video.append(measures[0])
            dirs = sorted(os.listdir(self.mot_dir))
            files_annotations = sorted(
                os.listdir(self.mot_dir + data.video_tag(video_idx)))
            #      if "FBMS" in self.dataset:
            #          files_annotations= sorted(os.listdir('/home/nray1/ms/FBMS/Annotations/480p/'+data.video_tag(video_idx) ))
            #      elif "FORDS_Rotation" in self.dataset or \
            #              "PDB" in self.mot_dir or \
            #              "FORDS_tasks" in self.dataset:
            #          files_annotations = sorted(os.listdir(self.mot_dir+data.video_tag(video_idx)))
            #      elif "FORD" in self.dataset:
            #          files_annotations = sorted(os.listdir(self.mot_dir+dirs[video_idx]))
            for t in xrange(0, n_frames):

                # Probability Map Function
                def get_posteriors():
                    n_, _, _, logits_val_, _ = self._process_forward_minibatch(
                        data,
                        network,
                        save_logits=False,
                        save_results=False,
                        targets=targets,
                        ys=ys,
                        start_frame_idx=t)
                    assert n_ == 1
                    return logits_val_[0]

                # Start Network Adaptation Only on first frame
                if t < self.few_shot_samples:
                    # Read adaptation target and postprocess it
                    # For DAVIS starts at 0, FORDS starts at 1 for frame numbers, FBMS use annotation files
                    if "FBMS" in self.mot_dir:
                        mask = cv2.imread(
                            self.mot_dir + data.video_tag(video_idx) + '/' +
                            files_annotations[t], 0)
                    else:
                        if "FORDS" in self.dataset:
                            f = open(
                                self.mot_dir + data.video_tag(video_idx) +
                                '/' + files_annotations[t], 'rb')
#                  elif "FORDS_Rotation" in self.dataset:
#                      f= open(self.mot_dir+data.video_tag(video_idx)+'/'+files_annotations[t], 'rb')
#                  elif "FORD" in self.dataset:
#                      f= open(self.mot_dir+dirs[video_idx]+'/'+files_annotations[t], 'rb')
                        elif "DAVIS" in self.dataset:
                            f = open(
                                self.mot_dir + dirs[video_idx] +
                                '/%05d.pickle' % (t), 'rb')


#                  else:
#                     f= open(self.mot_dir+data.video_tag(video_idx)+'/'+files_annotations[t].split('.')[0]+'.pickle', 'rb')
                        mask = pickle.load(f)[:, :, 1]
                    mask = (mask - mask.min()) * 1.0 / (mask.max() -
                                                        mask.min())
                    last_mask = np.expand_dims(mask, axis=2)

                    self._adapt(video_idx, t, last_mask, get_posteriors)

                # Compute IoU measures
                n, measures, ys_argmax_val, posteriors_val, targets_val = self._process_forward_minibatch(
                    data,
                    network,
                    save_logits,
                    self.save_oneshot,
                    targets,
                    ys,
                    start_frame_idx=t)
                assert n == 1
                assert len(measures) == 1
                measure = measures[0]
                print >> log.v5, "Motion Adapted frame", t, ":", measure
                measures_video.append(measure)

            measures_video = average_measures(measures_video)
            print >> log.v1, "sequence", video_idx + 1, data.video_tag(
                video_idx), measures_video