Esempio n. 1
0
def add_video():
    if not current_user.is_authenticated:
        return redirect('/non_authorization')
    if request.method == 'POST':
        file = request.files['file']
        if file and allowed_file(file.filename):
            filename = hash_password(file.filename) + '.mp4'
            path_video = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            file.save(path_video)
            get_pic(path_video)
            session = db_session.create_session()
            video = Video(description=request.form['description'],
                          filename=filename,
                          owner_id=current_user.id,
                          number_likes=0)
            user = session.query(User).filter(
                User.id == current_user.id).first()
            user.own_videos.append(video)
            user.videos += 1
            video.owner = user
            video.owner_id = user.id
            session.add(video)
            session.merge(user)
            session.commit()
            # return redirect('/my_videos/0')
        elif file and not allowed_file(file.filename):
            return make_response(jsonify({'error': 'wrong file format'}), 406)
            #return jsonify({'error': 'wrong file format'})

    return render_template('upload_video.html', title='Uploading video')
Esempio n. 2
0
 def __init__(self, args, current_iter=0):
     """
     Initializes a meta learning system dataloader. The data loader uses the Pytorch DataLoader class to parallelize
     batch sampling and preprocessing.
     :param args: An arguments NamedTuple containing all the required arguments.
     :param current_iter: Current iter of experiment. Is used to make sure the data loader continues where it left
     of previously.
     """
     self.num_of_gpus = args.num_gpu
     self.batch_size = {
         'train': args.batch_size,
         'val': args.val_batch_size,
         'test': args.test_batch_size
     }
     # self.samples_per_iter = args.samples_per_iter
     self.num_workers = args.num_workers
     self.total_train_iters_produced = 0
     if args.dataset == 'vimeo90k':
         self.dataset = VimeoSeptuplet(
             args=args)  #FewShotLearningDatasetParallel(args=args)
     elif args.dataset == 'test':
         self.dataset = Video(args=args)
     # self.batches_per_iter = args.samples_per_iter
     self.full_data_length = self.dataset.data_length
     self.continue_from_iter(current_iter=current_iter)
     self.args = args
Esempio n. 3
0
 def test_name_invalid(self):
     video = Video()
     try:
         video.name = "myname.ext"
         self.fail("Should have raised exception")
     except InvalidVideoError as e:
         self.assertTrue(type(e) is InvalidVideoError)
Esempio n. 4
0
 def __add_video_length_and_captured_time(video: Video):
     output = subprocess.run([
         'docker', 'run', '--rm', '-v',
         f"{os.path.abspath(video.path)}:/files", 'sjourdan/ffprobe',
         f"/files/{video.name}"
     ],
                             capture_output=True)
     try:
         lines = output.stderr.decode("utf-8").splitlines()
     except UnicodeDecodeError:
         lines = Factory.mydecode(output.stderr).splitlines()
     for line in lines:
         if line.find("Duration") > 0:
             d = Factory._duration.match(line)
             if d:
                 video.duration = int(d.group('hour')) * 3600 + int(
                     d.group('min')) * 60 + int(d.group('sec'))
         if line.find("creation_time") > 0:
             try:
                 video.captured = datetime.strptime(
                     line[line.find(':') + 2:],
                     Constants.video_duration_format).timestamp() * 1000
             except ValueError:
                 try:
                     video.captured = datetime.strptime(
                         line[line.find(':') + 2:].rstrip(), Constants.
                         video_duration_format2).timestamp() * 1000
                 except ValueError as e:
                     print(line)
                     print(video)
                     print(lines)
                     raise e
Esempio n. 5
0
    def from_elastic_entry(e):
        if e.kind == Constants.IMAGE_KIND:
            item = Image()
        elif e.kind == Constants.VIDEO_KIND:
            item = Video()
        elif e.kind == Constants.OTHER_KIND:
            item = Other()
        else:
            raise FactoryError(
                f"Entry mismatch, wrong kind {e.kind} found for: {e.name}; id:{e.meta.id}"
            )

        item.full_path = os.path.join(e.path, e.name)
        if e.type != item.type:
            raise FactoryError(f"Type mismatch for {e.name}")
        if e.kind != item.kind:
            raise FactoryError(f"Kind mismatch for {e.name}")
        for attr, value in inspect.getmembers(
                e, lambda a: not (inspect.isroutine(a))):
            if attr not in Constants.leave_out_when_reading_from_elastic:
                setattr(item, attr, value)
        if item.hash != e.hash:
            raise FactoryError(f"Hash mismatch for {e.name}")
        if item.path_hash != e.path_hash:
            raise FactoryError(f"Path-hash mismatch for {e.name}")
        item.id = e.meta.id

        return item
Esempio n. 6
0
 def from_dropbox(entry):
     """Create an Image or Video object based on the dropbox path given"""
     try:
         result = Image()
         result.full_path = entry['path']
     except InvalidImageError:
         try:
             result = Video()
             result.full_path = entry['path']
         except InvalidVideoError:
             try:
                 result = Other()
                 result.full_path = entry['path']
             except InvalidOtherError:
                 raise FactoryError(
                     f"Path {entry['path']} is neither image nor video nor other"
                 )
     del entry['path']
     return result.update(entry)
Esempio n. 7
0
class MetaLearningSystemDataLoader(object):
    def __init__(self, args, current_iter=0):
        """
        Initializes a meta learning system dataloader. The data loader uses the Pytorch DataLoader class to parallelize
        batch sampling and preprocessing.
        :param args: An arguments NamedTuple containing all the required arguments.
        :param current_iter: Current iter of experiment. Is used to make sure the data loader continues where it left
        of previously.
        """
        self.num_of_gpus = args.num_gpu
        self.batch_size = {
            'train': args.batch_size,
            'val': args.val_batch_size,
            'test': args.test_batch_size
        }
        # self.samples_per_iter = args.samples_per_iter
        self.num_workers = args.num_workers
        self.total_train_iters_produced = 0
        if args.dataset == 'vimeo90k':
            self.dataset = VimeoSeptuplet(
                args=args)  #FewShotLearningDatasetParallel(args=args)
        elif args.dataset == 'test':
            self.dataset = Video(args=args)
        # self.batches_per_iter = args.samples_per_iter
        self.full_data_length = self.dataset.data_length
        self.continue_from_iter(current_iter=current_iter)
        self.args = args

    def get_dataloader(self, mode='train'):
        """
        Returns a data loader with the correct set (train, val or test), continuing from the current iter.
        :return:
        """
        shuffle = True if mode == 'train' else False
        # return DataLoader(self.dataset, batch_size=(self.num_of_gpus * self.batch_size * self.samples_per_iter),
        #                   shuffle=False, num_workers=self.num_workers, drop_last=True)
        return DataLoader(self.dataset,
                          batch_size=self.batch_size[mode],
                          shuffle=shuffle,
                          num_workers=self.num_workers,
                          drop_last=False)

    def continue_from_iter(self, current_iter):
        """
        Makes sure the data provider is aware of where we are in terms of training iterations in the experiment.
        :param current_iter:
        """
        # self.total_train_iters_produced += (current_iter * (self.num_of_gpus * self.batch_size * self.samples_per_iter))
        self.total_train_iters_produced += (current_iter *
                                            self.batch_size["train"])

    def get_train_batches(self, total_batches=-1, augment_images=False):
        """
        Returns a training batches data_loader
        :param total_batches: The number of batches we want the data loader to sample
        :param augment_images: Whether we want the images to be augmented.
        """
        if total_batches == -1:
            self.dataset.data_length = self.full_data_length
        else:
            self.dataset.data_length[
                "train"] = total_batches * self.dataset.batch_size["train"]
        self.dataset.switch_set(set_name="train",
                                current_iter=self.total_train_iters_produced)
        # self.dataset.set_augmentation(augment_images=augment_images)
        # self.total_train_iters_produced += (self.num_of_gpus * self.batch_size * self.samples_per_iter)
        self.total_train_iters_produced += self.batch_size["train"]
        # for sample_id, sample_batched in enumerate(self.get_dataloader()):
        for sample_batched in self.get_dataloader(mode="train"):
            yield sample_batched

    def get_val_batches(self, total_batches=-1, augment_images=False):
        """
        Returns a validation batches data_loader
        :param total_batches: The number of batches we want the data loader to sample
        :param augment_images: Whether we want the images to be augmented.
        """
        if total_batches == -1:
            self.dataset.data_length = self.full_data_length
        else:
            self.dataset.data_length[
                'val'] = total_batches * self.dataset.batch_size["val"]
        self.dataset.switch_set(set_name="val")
        # self.dataset.set_augmentation(augment_images=augment_images)
        for sample_batched in self.get_dataloader(mode="val"):
            yield sample_batched

    def get_test_batches(self, total_batches=-1, augment_images=False):
        """
        Returns a testing batches data_loader
        :param total_batches: The number of batches we want the data loader to sample
        :param augment_images: Whether we want the images to be augmented.
        """
        if total_batches == -1:
            self.dataset.data_length = self.full_data_length
        else:
            self.dataset.data_length[
                'test'] = total_batches * self.dataset.batch_size["test"]
        self.dataset.switch_set(set_name='test')
        # self.dataset.set_augmentation(augment_images=augment_images)
        for sample_batched in self.get_dataloader(mode="test"):
            yield sample_batched
Esempio n. 8
0
 def test_avi_video(self):
     video = Video()
     video.name = "video.AVI"
     self.assertEqual(video.type, "avi")
     self.assertEqual(video.kind, Constants.VIDEO_KIND)
     self.assertEqual(video.name, "video.AVI")
Esempio n. 9
0
 def __video_from_directory_item(path: str) -> Video:
     video = Video()
     Factory.__entry_from_directory_item(video, path)
     Factory.__add_video_length_and_captured_time(video)
     return video