def test_get_full_entity_name(self): (asset_name, episode_id) = \ names_service.get_full_entity_name(self.asset.id) (shot_name, episode_id) = \ names_service.get_full_entity_name(self.shot.id) self.assertEquals(asset_name, "Props / Tree") self.assertEquals(shot_name, "E01 / S01 / P01")
def get_task_descriptors(person_id, task): """ Build task information needed to write notification emails: author object, full task name and task URL. """ author = persons_service.get_person(person_id) project = projects_service.get_project(task["project_id"]) task_type = tasks_service.get_task_type(task["task_type_id"]) entity = entities_service.get_entity(task["entity_id"]) (entity_name, episode_id) = names_service.get_full_entity_name(entity["id"]) episode_segment = "" entity_type = "assets" if task_type["for_shots"]: entity_type = "shots" if project["production_type"] == "tvshow": episode_segment = "/episodes/%s" % episode_id task_name = "%s / %s / %s" % ( project["name"], entity_name, task_type["name"], ) task_url = "%s://%s/productions/%s%s/%s/tasks/%s" % ( config.DOMAIN_PROTOCOL, config.DOMAIN_NAME, task["project_id"], episode_segment, entity_type, task["id"], ) return (author, task_name, task_url)
def get_running_preview_files(): """ Return preview files for all productions with status equals to broken or processing. """ entries = ( PreviewFile.query.join(Task) .join(Project) .join(ProjectStatus) .filter(ProjectStatus.name.in_(("Active", "open", "Open"))) .filter(PreviewFile.status.in_(("broken", "processing"))) .add_columns(Task.project_id, Task.task_type_id, Task.entity_id) .order_by(PreviewFile.created_at.desc()) ) results = [] for (preview_file, project_id, task_type_id, entity_id) in entries: result = preview_file.serialize() result["project_id"] = fields.serialize_value(project_id) result["task_type_id"] = fields.serialize_value(task_type_id) (result["full_entity_name"], _) = names_service.get_full_entity_name( entity_id ) results.append(result) return results
def build_row(self, time_spent_row): ( time_spent, project_name, entity_type_name, entity_id, entity_name, task_type_name, person_first_name, person_last_name, ) = time_spent_row if entity_type_name == "Shot": entity_name, _ = names_service.get_full_entity_name(entity_id) date = "" if time_spent.date is not None: date = time_spent.date.strftime("%Y-%m-%d") person_name = "%s %s" % (person_first_name, person_last_name) return [ project_name, person_name.strip(), entity_type_name, entity_name, task_type_name, date, time_spent.duration, ]
def get_raw_quota_shots_between(person_id, start, end, project_id=None, task_type_id=None): """ Get all shots leading to a quota computation during the given period. """ shot_type = get_shot_type() person = persons_service.get_person_raw(person_id) shots = [] query = (Entity.query.filter( Entity.entity_type_id == shot_type["id"]).filter( Task.project_id == project_id).filter( Task.task_type_id == task_type_id).filter( Task.end_date.between(start, end)).filter( Task.assignees.contains(person)).join( Task, Entity.id == Task.entity_id).join( Project, Project.id == Task.project_id)) query_shots = query.all() for entity in query_shots: shot = entity.serialize() full_name, _ = names_service.get_full_entity_name(shot["id"]) shot["full_name"] = full_name shot["weight"] = 1 shots.append(shot) return sorted(shots, key=itemgetter("full_name"))
def build_row(self, shot): name, _ = names_service.get_full_entity_name(shot["entity_id"]) preview_file = files_service.get_preview_file(shot["preview_file_id"]) task = tasks_service.get_task(shot["preview_file_task_id"]) task_type = self.task_type_map[task["task_type_id"]] task_status = self.task_status_map[task["task_status_id"]] comment = self.task_comment_map.get(task["id"], {}) author = self.get_author(comment) date = self.get_date(comment) return [ name, task_type["name"], preview_file["revision"], task_status["name"], author, date, comment.get("text",""), ]
def get_last_news_for_project( project_id, news_id=None, only_preview=False, task_type_id=None, task_status_id=None, author_id=None, page=1, page_size=50, before=None, after=None, ): """ Return last 50 news for given project. Add related information to make it displayable. """ offset = (page - 1) * page_size query = (News.query.order_by(News.created_at.desc()).join( Task, News.task_id == Task.id).join(Project).join( Entity, Task.entity_id == Entity.id).outerjoin( Comment, News.comment_id == Comment.id).outerjoin( PreviewFile, News.preview_file_id == PreviewFile.id).filter( Task.project_id == project_id)) if news_id is not None: query = query.filter(News.id == news_id) if task_status_id is not None: query = query.filter(Comment.task_status_id == task_status_id) query = query.filter(News.change == True) if task_type_id is not None: query = query.filter(Task.task_type_id == task_type_id) if author_id is not None: query = query.filter(News.author_id == author_id) if only_preview: query = query.filter(News.preview_file_id != None) if after is not None: query = query.filter(News.created_at > after) if before is not None: query = query.filter(News.created_at < before) (total, nb_pages) = _get_news_total(query, page_size) query = query.add_columns( Project.id, Project.name, Task.task_type_id, Comment.id, Comment.task_status_id, Task.entity_id, PreviewFile.extension, Entity.preview_file_id, ) query = query.limit(page_size) query = query.offset(offset) news_list = query.all() result = [] for ( news, project_id, project_name, task_type_id, comment_id, task_status_id, task_entity_id, preview_file_extension, entity_preview_file_id, ) in news_list: (full_entity_name, episode_id) = names_service.get_full_entity_name(task_entity_id) result.append( fields.serialize_dict({ "id": news.id, "type": "News", "author_id": news.author_id, "comment_id": news.comment_id, "task_id": news.task_id, "task_type_id": task_type_id, "task_status_id": task_status_id, "task_entity_id": task_entity_id, "preview_file_id": news.preview_file_id, "preview_file_extension": preview_file_extension, "project_id": project_id, "project_name": project_name, "created_at": news.created_at, "change": news.change, "full_entity_name": full_entity_name, "episode_id": episode_id, "entity_preview_file_id": entity_preview_file_id, })) return { "data": result, "total": total, "nb_pages": nb_pages, "limit": page_size, "offset": offset, "page": page, }
def get_last_notifications(notification_id=None, after=None, before=None): """ Return last 100 user notifications. """ current_user = persons_service.get_current_user() Author = aliased(Person, name="author") is_current_user_artist = current_user["role"] == "user" result = [] query = ( Notification.query.filter_by(person_id=current_user["id"]) .order_by(Notification.created_at.desc()) .join(Author, Author.id == Notification.author_id) .join(Task, Task.id == Notification.task_id) .join(Project, Project.id == Task.project_id) .outerjoin(Comment, Comment.id == Notification.comment_id) .add_columns( Project.id, Project.name, Task.task_type_id, Comment.id, Comment.task_status_id, Comment.text, Comment.replies, Task.entity_id, Author.role, ) ) if notification_id is not None: query = query.filter(Notification.id == notification_id) if after is not None: query = query.filter(Notification.created_at > after) if before is not None: query = query.filter(Notification.created_at < before) notifications = query.limit(100).all() for ( notification, project_id, project_name, task_type_id, comment_id, task_status_id, comment_text, comment_replies, task_entity_id, role, ) in notifications: (full_entity_name, episode_id) = names_service.get_full_entity_name( task_entity_id ) preview_file_id = None mentions = [] if comment_id is not None: comment = Comment.get(comment_id) if len(comment.previews) > 0: preview_file_id = comment.previews[0].id mentions = comment.mentions or [] reply_text = "" if notification.type == "reply": reply = next( ( reply for reply in comment_replies if reply["id"] == str(notification.reply_id) ), None, ) if reply is not None: reply_text = reply["text"] if role == "client" and is_current_user_artist: comment_text = "" reply_text = "" result.append( fields.serialize_dict( { "id": notification.id, "type": "Notification", "notification_type": notification.type, "author_id": notification.author_id, "comment_id": notification.comment_id, "task_id": notification.task_id, "task_type_id": task_type_id, "task_status_id": task_status_id, "mentions": mentions, "preview_file_id": preview_file_id, "project_id": project_id, "project_name": project_name, "comment_text": comment_text, "reply_text": reply_text, "created_at": notification.created_at, "read": notification.read, "change": notification.change, "full_entity_name": full_entity_name, "episode_id": episode_id, } ) ) return result
def get_last_notifications(notification_id=None): """ Return last 100 user notifications. """ current_user = persons_service.get_current_user_raw() result = [] query = (Notification.query.filter_by(person_id=current_user.id).order_by( Notification.created_at.desc()).join( Task, Project).outerjoin(Comment).add_columns( Project.id, Project.name, Task.task_type_id, Comment.id, Comment.task_status_id, Comment.text, Task.entity_id, )) if notification_id is not None: query = query.filter(Notification.id == notification_id) notifications = query.limit(100).all() for ( notification, project_id, project_name, task_type_id, comment_id, task_status_id, comment_text, task_entity_id, ) in notifications: (full_entity_name, episode_id) = names_service.get_full_entity_name(task_entity_id) preview_file_id = None mentions = [] if comment_id is not None: comment = Comment.get(comment_id) if len(comment.previews) > 0: preview_file_id = comment.previews[0].id mentions = comment.mentions or [] result.append( fields.serialize_dict({ "id": notification.id, "type": "Notification", "notification_type": notification.type, "author_id": notification.author_id, "comment_id": notification.comment_id, "task_id": notification.task_id, "task_type_id": task_type_id, "task_status_id": task_status_id, "mentions": mentions, "preview_file_id": preview_file_id, "project_id": project_id, "project_name": project_name, "comment_text": comment_text, "created_at": notification.created_at, "read": notification.read, "change": notification.change, "full_entity_name": full_entity_name, "episode_id": episode_id, })) return result
def get_last_news_for_project( project_id, filters={}, news_id=None, only_preview=False, task_type_id=None, task_status_id=None, page=1, page_size=50 ): """ Return last 100 user notifications. Add related information to make it displayable. """ offset = (page - 1) * page_size query = News.query \ .order_by(News.created_at.desc()) \ .join(Task, News.task_id == Task.id) \ .join(Project) \ .join(Entity, Task.entity_id == Entity.id) \ .outerjoin(Comment, News.comment_id == Comment.id) \ .outerjoin(PreviewFile, News.preview_file_id == PreviewFile.id) \ .filter(Task.project_id == project_id) \ .add_columns( Project.id, Project.name, Task.task_type_id, Comment.id, Comment.task_status_id, Task.entity_id, PreviewFile.extension, Entity.preview_file_id ) if news_id is not None: query = query.filter(News.id == news_id) if task_status_id is not None: query = query.filter(Comment.task_status_id == task_status_id) if task_type_id is not None: query = query.filter(Task.task_type_id == task_type_id) if only_preview: query = query.filter(News.preview_file_id != None) query = query.limit(page_size) query = query.offset(offset) news_list = query.all() result = [] for ( news, project_id, project_name, task_type_id, comment_id, task_status_id, task_entity_id, preview_file_extension, entity_preview_file_id ) in news_list: (full_entity_name, episode_id) = \ names_service.get_full_entity_name(task_entity_id) result.append(fields.serialize_dict({ "id": news.id, "author_id": news.author_id, "comment_id": news.comment_id, "task_id": news.task_id, "task_type_id": task_type_id, "task_status_id": task_status_id, "task_entity_id": task_entity_id, "preview_file_id": news.preview_file_id, "preview_file_extension": preview_file_extension, "project_id": project_id, "project_name": project_name, "created_at": news.created_at, "change": news.change, "full_entity_name": full_entity_name, "episode_id": episode_id, "entity_preview_file_id": entity_preview_file_id })) return result
def get_weighted_quota_shots_between(person_id, start, end, project_id=None, task_type_id=None): """ Get all shots leading to a quota computation during the given period. Set a weight on each one: * If there is time spent filled, weight it by the sum of duration divided py the overall task duration. * If there is no time spent, weight it by the number of business days in the time interval spent between WIP date (start) and feedback date (end). """ shot_type = get_shot_type() person = persons_service.get_person_raw(person_id) shots = [] already_listed = {} query = (Entity.query.filter( Entity.entity_type_id == shot_type["id"]).filter( Task.project_id == project_id).filter( Task.task_type_id == task_type_id).filter( Task.end_date != None).filter( TimeSpent.person_id == person_id). filter(TimeSpent.date >= start).filter(TimeSpent.date < end).join( Task, Entity.id == Task.entity_id).join( Project, Project.id == Task.project_id).join( TimeSpent, Task.id == TimeSpent.task_id).add_columns( Task.duration, TimeSpent.duration)) query_shots = query.all() for (entity, task_duration, duration) in query_shots: shot = entity.serialize() if shot["id"] not in already_listed: full_name, _ = names_service.get_full_entity_name(shot["id"]) shot["full_name"] = full_name shot["weight"] = round(duration / task_duration, 2) or 0 shots.append(shot) already_listed[shot["id"]] = shot else: shot = already_listed[shot["id"]] shot["weight"] += round(duration / task_duration, 2) start = date_helpers.get_datetime_from_string(start) end = date_helpers.get_datetime_from_string(end) query = (Entity.query.filter( Entity.entity_type_id == shot_type["id"]).filter( Task.project_id == project_id).filter( Task.task_type_id == task_type_id).filter( Task.end_date != None).filter( Task.real_start_date != None).filter( Task.assignees.contains(person)). filter((Task.real_start_date <= end) & (Task.end_date >= start)).filter( TimeSpent.id == None).join( Task, Entity.id == Task.entity_id).join( Project, Project.id == Task.project_id).outerjoin( TimeSpent, TimeSpent.task_id == Task.id).add_columns( Task.real_start_date, Task.end_date)) query_shots = query.all() for (entity, task_start, task_end) in query_shots: shot = entity.serialize() if shot["id"] not in already_listed: business_days = ( date_helpers.get_business_days(task_start, task_end) + 1) full_name, _ = names_service.get_full_entity_name(shot["id"]) shot["full_name"] = full_name multiplicator = 1 if task_start >= start and task_end <= end: multiplicator = business_days elif task_start >= start: multiplicator = ( date_helpers.get_business_days(task_start, end) + 1) elif task_end <= end: multiplicator = ( date_helpers.get_business_days(start, task_end) + 1) shot["weight"] = round(multiplicator / business_days, 2) already_listed[shot["id"]] = True shots.append(shot) return sorted(shots, key=itemgetter("full_name"))