コード例 #1
0
    def __init__(self, phase, single_object=False):
        super(UVOSLoader, self).__init__()

        self._phase = phase
        self._single_object = single_object

        self._db_sequences = db_read_sequences(self._phase)

        # Load sequences
        self.sequences = []
        self.annotations = []
        for s in self._db_sequences:
            frames_dict = s['frames']
            im_path = s['image_path']
            anno_path = s['anno_path']
            for label_id, frames in frames_dict.iteritems():
                name = s['vid']  #get_sequence_name(s['vid'], label_id)
                self.sequences.append(Sequence(name, im_path, frames,
                                               label_id))

                self.annotations.append(
                    Annotation(name, anno_path, frames, label_id))

        self._keys = dict(
            zip([s.name for s in self.sequences], range(len(self.sequences))))

        try:
            self.color_palette = np.array(
                Image.open(self.annotations[0].files[0]).getpalette()).reshape(
                    -1, 3)
        except Exception as e:
            self.color_palette = np.array([[0, 255, 0]])
コード例 #2
0
ファイル: davis17.py プロジェクト: birdman9391/MetaMaskTrack
    def __init__(self, year, phase, single_object=False):
        super(DAVISLoader, self).__init__()

        self._year = year
        self._phase = phase
        self._single_object = single_object

        assert year == "2017" or year == "2016"

        # check the phase
        if year == '2016':
            if not (self._phase == phase.TRAIN or self._phase == phase.VAL or \
                self._phase == phase.TRAINVAL):
                raise Exception(
                    "Set \'{}\' not available in DAVIS 2016 ({},{},{})".format(
                        self._phase.name, phase.TRAIN.name, phase.VAL.name,
                        phase.TRAINVAL.name))

        # Check single_object if False iif year is 2016
        if self._single_object:
            assert self._year == '2016'

        self._db_sequences = db_read_sequences(year, self._phase)

        # Load sequences
        self.sequences = [Sequence(s.name) for s in self._db_sequences]

        # Load sequences
        """
    if self._phase==phase.TESTDEV:
        self.annotations = [None for s in self._db_sequences]
    else:
    """
        self.annotations = [
            Annotation(s.name, self._single_object) for s in self._db_sequences
        ]

        self._keys = dict(
            zip([s.name for s in self.sequences], range(len(self.sequences))))

        # Check number of frames is correct
        for sequence, db_sequence in zip(self.sequences, self._db_sequences):
            assert len(sequence) == db_sequence.num_frames

        # Check number of annotations is correct
        for annotation, db_sequence in zip(self.sequences, self._db_sequences):
            if (self._phase == phase.TRAIN) or (self._phase == phase.VAL):
                assert len(annotation) == db_sequence.num_frames
            elif self._phase == phase.TESTDEV:
                pass

        try:
            self.color_palette = np.array(
                Image.open(self.annotations[0].files[0]).getpalette()).reshape(
                    -1, 3)
        except Exception as e:
            self.color_palette = np.array([[0, 255, 0]])
コード例 #3
0
ファイル: text.py プロジェクト: whitenick/SoccerPredictions
 def _create_sentence_objects(self):
     '''Returns a list of Sentence objects from the raw text.
 '''
     sentence_objects = []
     sent_tokenizer = SentenceTokenizer(locale=self.language.code)
     seq = Sequence(self.raw)
     seq = sent_tokenizer.transform(seq)
     for start_index, end_index in zip(seq.idx[:-1], seq.idx[1:]):
         # Sentences share the same models as their parent blob
         sent = seq.text[start_index:end_index].strip()
         if not sent: continue
         s = Sentence(sent, start_index=start_index, end_index=end_index)
         s.detected_languages = self.detected_languages
         sentence_objects.append(s)
     return sentence_objects
コード例 #4
0
ファイル: text.py プロジェクト: whitenick/SoccerPredictions
 def tokens(self):
     """Return a list of tokens, using this blob's tokenizer object
 (defaults to :class:`WordTokenizer <textblob.tokenizers.WordTokenizer>`).
 """
     seq = self.word_tokenizer.transform(Sequence(self.raw))
     return WordList(seq.tokens(), parent=self, language=self.language.code)