Пример #1
0
def get_episodes_for_project(project_id, only_assigned=False):
    """
    Retrieve all episodes related to given project.
    """
    if only_assigned:
        Sequence = aliased(Entity, name="sequence")
        Shot = aliased(Entity, name="shot")
        Asset = aliased(Entity, name="asset")
        query = (
            Entity.query.join(Sequence, Entity.id == Sequence.parent_id)
            .join(Shot, Sequence.id == Shot.parent_id)
            .join(Task, Shot.id == Task.entity_id)
            .filter(Entity.project_id == project_id)
            .filter(user_service.build_assignee_filter())
        )
        shot_episodes = fields.serialize_models(query.all())
        shot_episode_ids = {episode["id"]: True for episode in shot_episodes}
        query = (
            Entity.query.join(Asset, Entity.id == Asset.source_id)
            .join(Task, Asset.id == Task.entity_id)
            .filter(Entity.project_id == project_id)
            .filter(user_service.build_assignee_filter())
        )
        asset_episodes = fields.serialize_models(query.all())
        result = shot_episodes
        for episode in asset_episodes:
            if episode["id"] not in shot_episode_ids:
                result.append(episode)
        return result
    else:
        return entities_service.get_entities_for_project(
            project_id, get_episode_type()["id"], "Episode"
        )
Пример #2
0
def get_preview_files_for_entity(entity_id):
    """
    Get all preview files available for given shot.
    """
    tasks = tasks_service.get_task_dicts_for_entity(entity_id)
    previews = {}

    for task in tasks:
        preview_files = (PreviewFile.query.filter_by(
            task_id=task["id"]).join(Task).join(TaskType).order_by(
                TaskType.priority.desc()).order_by(TaskType.name).order_by(
                    PreviewFile.revision.desc()).order_by(
                        PreviewFile.created_at).all())
        task_type_id = task["task_type_id"]

        if len(preview_files) > 0:
            preview_files = fields.serialize_models(preview_files)
            preview_files = mix_preview_file_revisions(preview_files)
            previews[task_type_id] = [
                {
                    "id": preview_file["id"],
                    "revision": preview_file["revision"],
                    "extension": preview_file["extension"],
                    "annotations": preview_file["annotations"],
                    "previews": preview_file["previews"],
                    "created_at": preview_file["created_at"],
                    "task_id": preview_file["task_id"]
                } for preview_file in preview_files
            ]  # Do not add too much field to avoid building too big responses

    return previews
Пример #3
0
def get_last_output_files_for_instance(
    asset_instance_id,
    temporal_entity_id,
    task_type_id=None,
    output_type_id=None,
    name=None,
    representation=None,
    file_status_id=None,
):
    """
    Get last output files for given entity grouped by output type and name.
    """
    # Query maximum revision for each possible arguments
    query = OutputFile.query.with_entities(
        OutputFile.temporal_entity_id,
        OutputFile.task_type_id,
        OutputFile.output_type_id,
        OutputFile.name,
        OutputFile.representation,
        func.max(OutputFile.revision).label("MAX"),
    ).group_by(
        OutputFile.temporal_entity_id,
        OutputFile.task_type_id,
        OutputFile.output_type_id,
        OutputFile.name,
        OutputFile.representation,
    )
    query = query.filter(OutputFile.asset_instance_id == asset_instance_id)
    query = query.filter(OutputFile.temporal_entity_id == temporal_entity_id)
    statement = query.subquery()

    # Create a join query to retrieve maximum revision
    query = OutputFile.query.join(
        statement,
        and_(
            OutputFile.temporal_entity_id == statement.c.temporal_entity_id,
            OutputFile.task_type_id == statement.c.task_type_id,
            OutputFile.output_type_id == statement.c.output_type_id,
            OutputFile.name == statement.c.name,
            OutputFile.representation == statement.c.representation,
            OutputFile.revision == statement.c.MAX,
        ),
    )

    # Filter by specified arguments
    query = query.filter(OutputFile.asset_instance_id == asset_instance_id)
    query = query.filter(OutputFile.temporal_entity_id == temporal_entity_id)
    if task_type_id:
        query = query.filter(OutputFile.task_type_id == task_type_id)
    if output_type_id:
        query = query.filter(OutputFile.output_type_id == output_type_id)
    if name:
        query = query.filter(OutputFile.name == name)
    if representation:
        query = query.filter(OutputFile.representation == representation)
    if representation:
        query = query.filter(OutputFile.file_status_id == file_status_id)

    output_files = query.all()
    return fields.serialize_models(output_files)
Пример #4
0
def get_last_working_files_for_task(task_id):
    """
    Get last revisions for given task grouped by file name.
    """
    query = WorkingFile.query.with_entities(
        WorkingFile.name,
        WorkingFile.task_id,
        func.max(WorkingFile.revision).label("MAX"),
    ).group_by(
        WorkingFile.name,
        WorkingFile.task_id,
    )

    query = query.filter(WorkingFile.task_id == task_id)
    statement = query.subquery()

    query = WorkingFile.query.join(
        statement,
        and_(
            WorkingFile.task_id == statement.c.task_id,
            WorkingFile.name == statement.c.name,
            WorkingFile.revision == statement.c.MAX,
        ),
    )

    # query
    working_files = fields.serialize_models(query.all())

    # group by name
    working_files_by_name = {
        k: list(v)[0]
        for k, v in itertools.groupby(working_files, key=itemgetter('name'))
    }

    return working_files_by_name
Пример #5
0
def get_all_attachment_files_for_task(task_id):
    """
    Return all attachment files listed into given task.
    """
    attachment_files = (AttachmentFile.query.join(Comment).join(
        Task, Task.id == Comment.object_id).filter(Task.id == task_id))
    return fields.serialize_models(attachment_files)
Пример #6
0
def get_output_files_for_instance(
    asset_instance_id,
    temporal_entity_id,
    task_type_id=None,
    output_type_id=None,
    name=None,
    representation=None,
    file_status_id=None,
):
    """
    Return output files for given instance ordered by revision.
    """
    query = OutputFile.query.filter_by(asset_instance_id=asset_instance_id)

    if temporal_entity_id:
        query = query.filter(
            OutputFile.temporal_entity_id == temporal_entity_id)
    if task_type_id:
        query = query.filter(OutputFile.task_type_id == task_type_id)
    if output_type_id:
        query = query.filter(OutputFile.output_type_id == output_type_id)
    if name:
        query = query.filter(OutputFile.name == name)
    if representation:
        query = query.filter(OutputFile.representation == representation)
    if file_status_id:
        query = query.filter(OutputFile.file_status_id == file_status_id)

    output_files = (query.filter(OutputFile.revision >= 0).order_by(
        desc(OutputFile.revision)).all())
    return fields.serialize_models(output_files)
Пример #7
0
    def post(self):
        results = []
        self.sg_entries = request.json

        self.check_permissions()
        self.prepare_import()

        for sg_entry in self.filtered_entries():
            try:
                data = self.extract_data(sg_entry)
                result_entry = self.import_entry(data)
                results.append(result_entry)
            except ShotgunEntryImportFailed as exception:
                current_app.logger.warn(exception)
            except KeyError as exception:
                current_app.logger.warn(exception)
                current_app.logger.error(
                    "Your data is not properly formatted: %s" % sg_entry
                )
            except IntegrityError:
                current_app.logger.error(
                    "Data information are duplicated or wrong: %s" %
                    sg_entry
                )

            self.post_processing()

        return fields.serialize_models(results), 200
Пример #8
0
def get_task_types_for_entity(entity_id):
    """
    Return all task types for which there is a task related to given entity.
    """
    task_types = (TaskType.query.join(
        Task, Entity).filter(Entity.id == entity_id).all())
    return fields.serialize_models(task_types)
Пример #9
0
def get_task_types_for_project(project_id):
    """
    Return all task types for which there is a task related to given project.
    """
    task_types = (TaskType.query.join(Task).filter(
        Task.project_id == project_id).distinct(TaskType.id).all())
    return fields.serialize_models(task_types)
Пример #10
0
def get_processing_preview_files_for_project():
    """
    """
    preview_files = (PreviewFile.query.join(Task).filter(
        PreviewFile.status.in_(("Broken", "Processing"))).add_column(
            Task.task_status_id).add_column(Task.entity_id))
    return fields.serialize_models(preview_files)
Пример #11
0
def get_output_files_for_entity(entity_id):
    """
    Return output files for given entity ordered by revision.
    """
    output_files = OutputFile.query.filter_by(entity_id=entity_id).filter(
        OutputFile.revision >= 0).order_by(desc(OutputFile.revision)).all()
    return fields.serialize_models(output_files)
Пример #12
0
def get_preview_files_for_task(task_id):
    """
    Get all preview files for given task.
    """
    previews = PreviewFile.filter_by(task_id=task_id).order_by(
        PreviewFile.revision.desc())
    return fields.serialize_models(previews)
Пример #13
0
def get_active_persons():
    """
    Return all person with flag active set to True.
    """
    persons = (Person.query.filter_by(active=True).order_by(
        Person.first_name).order_by(Person.last_name).all())
    return fields.serialize_models(persons)
Пример #14
0
def get_last_output_files_for_entity(
    entity_id,
    task_type_id=None,
    output_type_id=None,
    name=None,
    representation=None,
):
    """
    Get last output files for given parameters.

    We use a subquery to get maximum revision and then filter with given
    params.
    """
    # Query maximum revision for each possible arguments
    query = OutputFile.query.with_entities(
        OutputFile.task_type_id,
        OutputFile.output_type_id,
        OutputFile.name,
        OutputFile.representation,
        func.max(OutputFile.revision).label("MAX"),
    ).group_by(
        OutputFile.task_type_id,
        OutputFile.output_type_id,
        OutputFile.name,
        OutputFile.representation,
    )
    query = query.filter(OutputFile.entity_id == entity_id)
    query = query.filter(OutputFile.asset_instance_id == None)
    statement = query.subquery()

    # Create a join query to retrieve maximum revision and filter by
    # specified arguments
    query = OutputFile.query.join(
        statement,
        and_(
            OutputFile.task_type_id == statement.c.task_type_id,
            OutputFile.output_type_id == statement.c.output_type_id,
            OutputFile.name == statement.c.name,
            OutputFile.representation == statement.c.representation,
            OutputFile.revision == statement.c.MAX,
        ),
    )

    # Filter by specified arguments
    if task_type_id:
        query = query.filter(OutputFile.task_type_id == task_type_id)
    if output_type_id:
        query = query.filter(OutputFile.output_type_id == output_type_id)
    if name:
        query = query.filter(OutputFile.name == name)
    if representation:
        query = query.filter(OutputFile.representation == representation)

    query = query.filter(OutputFile.entity_id == entity_id)
    query = query.filter(OutputFile.asset_instance_id == None)

    # query
    output_files = query.all()
    return fields.serialize_models(output_files)
Пример #15
0
 def post(self):
     kitsu_entries = request.json
     instances = []
     for entry in kitsu_entries:
         if self.pre_check_entry():
             instance = self.model.create_from_import(entry)
             instances.append(instance)
     return fields.serialize_models(instances)
Пример #16
0
def get_preview_files_for_revision(task_id, revision):
    """
    Get all preview files for given task and revision.
    """
    preview_files = (PreviewFile.query.filter_by(task_id=task_id,
                                                 revision=revision).order_by(
                                                     PreviewFile.position))
    return fields.serialize_models(preview_files)
Пример #17
0
def get_working_files_for_task(task_id):
    """
    Retrieve all working files for a given task ordered by revision from
    biggest to smallest revision.
    """
    working_files = WorkingFile.query.filter_by(task_id=task_id).filter(
        WorkingFile.revision >= 0).order_by(desc(WorkingFile.revision)).all()
    return fields.serialize_models(working_files)
Пример #18
0
def get_metadata_descriptors(project_id):
    """
    Get all metadata descriptors for given project and entity type.
    """
    descriptors = MetadataDescriptor.query \
        .filter(MetadataDescriptor.project_id == project_id) \
        .order_by(MetadataDescriptor.name) \
        .all()
    return fields.serialize_models(descriptors)
Пример #19
0
def get_all_attachment_files_for_project(project_id):
    """
    Return all attachment files listed into given project. It is mainly needed
    for synchronisation purposes.
    """
    attachment_files = (AttachmentFile.query.join(Comment).join(
        Task,
        Task.id == Comment.object_id).filter(Task.project_id == project_id))
    return fields.serialize_models(attachment_files)
Пример #20
0
 def post(self):
     kitsu_entries = request.json
     if type(kitsu_entries) != list:
         raise WrongParameterException("A list of entities is expected.")
     instances = []
     for entry in kitsu_entries:
         if self.pre_check_entry():
             instance = self.model.create_from_import(entry)
             instances.append(instance)
     return fields.serialize_models(instances)
Пример #21
0
def get_output_files_for_instance(asset_instance_id, temporal_entity_id):
    """
    Return output files for given instance ordered by revision.
    """
    output_files = OutputFile.query.filter_by(
        asset_instance_id=asset_instance_id,
        temporal_entity_id=temporal_entity_id).filter(
            OutputFile.revision >= 0).order_by(desc(
                OutputFile.revision)).all()
    return fields.serialize_models(output_files)
Пример #22
0
def get_task_types_for_episode(episode_id):
    """
    Return all task types for which there is a task related to given episode.
    """
    Sequence = aliased(Entity, name="sequence")
    Episode = aliased(Entity, name="episode")
    task_types = (TaskType.query.join(Task, Entity).join(
        Sequence, Sequence.id == Entity.parent_id).join(
            Episode, Episode.id == Sequence.parent_id).filter(
                Episode.id == episode_id).group_by(TaskType.id).all())
    return fields.serialize_models(task_types)
Пример #23
0
def get_metadata_descriptors(project_id, for_client=False):
    """
    Get all metadata descriptors for given project and entity type.
    """
    query = MetadataDescriptor.query.filter(
        MetadataDescriptor.project_id == project_id).order_by(
            MetadataDescriptor.name)
    if for_client:
        query = query.filter(MetadataDescriptor.for_client == True)

    descriptors = query.all()
    return fields.serialize_models(descriptors)
Пример #24
0
def get_sequences_for_episode(episode_id, only_assigned=False):
    """
    Retrieve all sequences related to given episode.
    """
    if only_assigned:
        Shot = aliased(Entity, name="shot")
        query = (Entity.query.join(Shot, Entity.id == Shot.parent_id).join(
            Task, Shot.id == Task.entity_id).filter(
                Entity.parent_id == episode_id).filter(
                    user_service.build_assignee_filter()))
        return fields.serialize_models(query.all())
    else:
        return get_episodes({"parent_id": episode_id})
Пример #25
0
def get_sequences_for_project(project_id, only_assigned=False):
    """
    Retrieve all sequences related to given project.
    """
    if only_assigned:
        Shot = aliased(Entity, name="shot")
        query = (Entity.query.join(Shot, Entity.id == Shot.parent_id).join(
            Task, Shot.id == Task.entity_id).filter(
                Entity.project_id == project_id).filter(
                    user_service.build_assignee_filter()))
        return fields.serialize_models(query.all())
    else:
        return entities_service.get_entities_for_project(
            project_id,
            get_sequence_type()["id"], "Sequence")
Пример #26
0
def get_working_files_for_entity(entity_id, task_id=None, name=None):
    """
    Retrieve all working files for a given entity and specified parameters
    ordered by revision from biggest to smallest revision.
    """
    query = WorkingFile.query.filter_by(entity_id=entity_id)

    if task_id:
        query = query.filter(WorkingFile.task_id == task_id)
    if name:
        query = query.filter(WorkingFile.name == name)

    query = query.filter(WorkingFile.revision >= 0).order_by(
        desc(WorkingFile.revision))

    working_files = query.all()
    return fields.serialize_models(working_files)
Пример #27
0
def get_person_related_tasks(person_id, task_type_id):
    """
    Retrieve all tasks for given task types and to entiities
    that have at least one person assignation.
    """
    person = Person.get(person_id)
    projects = projects_service.open_projects()
    project_ids = [project["id"] for project in projects]

    entities = (Entity.query.join(Task, Entity.id == Task.entity_id).filter(
        Task.assignees.contains(person)).filter(
            Entity.project_id.in_(project_ids))).all()

    entity_ids = [entity.id for entity in entities]
    tasks = (Task.query.filter(Task.entity_id.in_(entity_ids)).filter(
        Task.task_type_id == task_type_id)).all()

    return fields.serialize_models(tasks)
Пример #28
0
    def post(self):
        uploaded_file = request.files["file"]
        file_name = "%s.csv" % uuid.uuid4()

        file_path = os.path.join(app.config["TMP_DIR"], file_name)
        uploaded_file.save(file_path)
        result = []

        self.prepare_import()
        try:
            with open(file_path) as csvfile:
                reader = csv.DictReader(csvfile)
                for row in reader:
                    result.append(self.import_row(row))

            return fields.serialize_models(result), 201
        except KeyError as e:
            return {"error": "A column is missing: %s" % e}, 400
Пример #29
0
    def post(self):
        kitsu_entries = request.json
        if type(kitsu_entries) != list:
            raise WrongParameterException("A list of entities is expected.")

        instances = []
        for entry in kitsu_entries:
            if self.check_access(entry):
                try:
                    (instance,
                     is_updated) = self.model.create_from_import(entry)
                    if is_updated:
                        self.emit_event("update", entry)
                    else:
                        self.emit_event("new", entry)
                except IntegrityError as exc:
                    raise WrongParameterException(exc.orig)
                instances.append(instance)
        return fields.serialize_models(instances)
Пример #30
0
def get_output_files_for_instance(
    asset_instance_id,
    temporal_entity_id,
    task_type_id=None,
    output_type_id=None,
    name=None,
    representation=None,
    file_status_id=None,
    created_at_since=None,
    person_id=None,
):
    """
    Return output files for given instance ordered by revision.
    """
    query = OutputFile.query.filter_by(asset_instance_id=asset_instance_id)

    if temporal_entity_id:
        query = query.filter(
            OutputFile.temporal_entity_id == temporal_entity_id)
    if task_type_id:
        query = query.filter(OutputFile.task_type_id == task_type_id)
    if output_type_id:
        query = query.filter(OutputFile.output_type_id == output_type_id)
    if name:
        query = query.filter(OutputFile.name == name)
    if representation:
        query = query.filter(OutputFile.representation == representation)
    if file_status_id:
        query = query.filter(OutputFile.file_status_id == file_status_id)
    if created_at_since:
        days = datetime.datetime.now() - datetime.timedelta(
            days=int(created_at_since))
        query = query.filter(OutputFile.created_at >= days)
    if person_id:
        query = query.filter(OutputFile.person_id == person_id)

    output_files = (query.filter(OutputFile.revision >= 0).order_by(
        desc(OutputFile.revision)).all())
    return fields.serialize_models(output_files, relations=True)