def get_groups(cls, project_id, sort_key=SORT_KEY, is_reverse=False, per_page=PER_PAGE, page=1): validation_check(per_page, page) try: if is_reverse is False: groups = Groups.objects.order_by(sort_key).filter( project_id=project_id, delete_flag=False) else: groups = Groups.objects.order_by(sort_key).reverse().filter( project_id=project_id, delete_flag=False) except FieldError: groups = Groups.objects.order_by(SORT_KEY).filter( project_id=project_id, delete_flag=False) if groups is None: raise ObjectDoesNotExist() records = [] for group in groups: record = {} record['id'] = group.id record['name'] = group.name record['members'] = cls.__get_group_users(group.id) records.append(record) contents = {} contents['count'] = len(groups) contents['records'] = records return contents
def get_projects(self, user_id, sort_key=SORT_KEY, is_reverse=False, per_page=PER_PAGE, page=1, search_keyword=""): validation_check(per_page, page) begin = per_page * (page - 1) project_ids = self.__user_projects(user_id) try: if is_reverse is False: projects = Projects.objects.order_by(sort_key).filter( Q(id__in=project_ids), Q(delete_flag=False), Q(name__contains=search_keyword) | Q(description__contains=search_keyword))[begin:begin + per_page] else: projects = Projects.objects.order_by(sort_key).reverse( ).filter( Q(id__in=project_ids), Q(delete_flag=False), Q(name__contains=search_keyword) | Q(description__contains=search_keyword))[begin:begin + per_page] except FieldError: projects = Projects.objects.order_by("id").filter( Q(id__in=project_ids), Q(delete_flag=False), Q(name__contains=search_keyword) | Q(description__contains=search_keyword))[begin:begin + per_page] records = [] for project in projects: record = {} record['id'] = project.id record['name'] = project.name record['description'] = project.description record['label_type'] = project.label_type record['created_at'] = str(project.created_at) records.append(record) try: klassset_manager = KlasssetManager() klassset = klassset_manager.get_klassset(project.id) record['klassset_name'] = klassset.name record['klassset_id'] = klassset.id except Exception: record['klassset_name'] = ' ' record['klassset_id'] = 0 contents = {} contents['count'] = self.project_total_count(user_id) contents['records'] = records contents['sort_key'] = sort_key contents['per_page'] = per_page contents['page'] = page return contents
def list_jobs(cls, project_id, sort_key, is_reverse=False, per_page=PER_PAGE, page=1, search_keyword=""): validation_check(per_page, page) begin = per_page * (page - 1) try: if is_reverse is False: jobs = Job.objects.order_by(sort_key).filter( Q(project_id=project_id), Q(job_type__contains=search_keyword) | Q(job_config__contains=search_keyword) )[begin:begin + per_page] else: jobs = Job.objects.order_by(sort_key).reverse().filter( Q(project_id=project_id), Q(job_type__contains=search_keyword) | Q(job_config__contains=search_keyword) )[begin:begin + per_page] except FieldError: jobs = Job.objects.order_by("id").filter( Q(project_id=project_id), Q(job_type__contains=search_keyword) | Q(job_config__contains=search_keyword) )[begin:begin + per_page] records = [] for job in jobs: record = {} record['id'] = job.id record['job_type'] = job.job_type if job.status not in [STATUS_MAP['succeeded'], STATUS_MAP['failed']]: status, start_time, completion_time = cls.__get_job_status(job.id, job.job_type) if job.status != STATUS_MAP['unknown'] and status == STATUS_MAP['unknown']: job.unknown_started_at = datetime.now(timezone.utc) job.status = status job.started_at = start_time job.completed_at = completion_time if job.status == STATUS_MAP['unknown'] and cls.__is_unknown_time_limit(job.unknown_started_at): job.status = STATUS_MAP['failed'] if job.status == STATUS_MAP['failed']: namespace = cls.__generate_job_namespace() pod_log = BaseJob().logs(cls.__generate_job_name(job.id, job.job_type), namespace) job.pod_log = pod_log job.save() record['status'] = job.status record['started_at'] = str(job.started_at) if job.started_at else '' record['completed_at'] = str(job.completed_at) if job.completed_at else '' record['registered_at'] = str(job.registered_at) record['description'] = cls.get_job_description(job.job_type, job.job_config) record['pod_log'] = job.pod_log record['user_id'] = job.user_id records.append(record) contents = {} contents['count'] = cls.job_total_count(project_id) contents['records'] = records return contents
def get_originals(self, project_id, sort_key=SORT_KEY, is_reverse=False, per_page=PER_PAGE, page=1, search_keyword="", status=""): validation_check(per_page, page) begin = per_page * (page - 1) try: if is_reverse is False: originals = Original.objects.order_by(sort_key).filter( Q(project_id=project_id), Q(delete_flag=False), Q(name__contains=search_keyword), Q(status__contains=status))[begin:begin + per_page] else: originals = Original.objects.order_by( sort_key).reverse().filter( Q(project_id=project_id), Q(delete_flag=False), Q(name__contains=search_keyword), Q(status__contains=status))[begin:begin + per_page] except FieldError: originals = Original.objects.order_by("id").filter( Q(project_id=project_id), Q(delete_flag=False), Q(name__contains=search_keyword), Q(status__contains=status))[begin:begin + per_page] records = [] for original in originals: record = {} record['id'] = original.id record['name'] = original.name record['file_type'] = original.file_type record['size'] = int(original.size) record['status'] = original.status if original.status == 'analyzed': record['dataset_candidates'] = self.get_dataset_candidates( project_id, original.id)['records'] else: record['dataset_candidates'] = [] # TODO: job_id records.append(record) contents = {} contents['count'] = self.original_total_count(project_id) contents['records'] = records return contents
def get_datasets(self, project_id, user_id, sort_key=SORT_KEY, is_reverse=False, per_page=PER_PAGE, page=1, search_keyword=""): validation_check(per_page, page) begin = per_page * (page - 1) try: if is_reverse is False: datasets = LabelDataset.objects.order_by(sort_key).filter( Q(project_id=project_id), Q(name__contains=search_keyword) | Q(name__contains=search_keyword))[begin:begin + per_page] else: datasets = LabelDataset.objects.order_by( sort_key).reverse().filter( Q(project_id=project_id), Q(name__contains=search_keyword) | Q(name__contains=search_keyword))[begin:begin + per_page] except FieldError: datasets = LabelDataset.objects.order_by("id").filter( Q(project_id=project_id), Q(name__contains=search_keyword) | Q(name__contains=search_keyword))[begin:begin + per_page] records = [] for dataset in datasets: record = {} record['id'] = dataset.id record['created_at'] = str(dataset.created_at) record['updated_at'] = str(dataset.updated_at) record['file_path'] = dataset.file_path record['name'] = dataset.name record['frame_count'] = dataset.frame_count record['original_id'] = dataset.original records.append(record) contents = {} contents['count'] = self.dataset_total_count(project_id) contents['records'] = records return contents
def list_annotations(self, project_id, sort_key=SORT_KEY, is_reverse=False, per_page=PER_PAGE, page=1, search_keyword=""): annotations = Annotation.objects.filter(project=project_id) validation_check(per_page, page) begin = per_page * (page - 1) try: if is_reverse is False: annotations = Annotation.objects.order_by(sort_key).filter( Q(project_id=project_id), Q(name__contains=search_keyword))[begin:begin + per_page] else: annotations = Annotation.objects.order_by( sort_key).reverse().filter( Q(project_id=project_id), Q(name__contains=search_keyword))[begin:begin + per_page] except FieldError: annotations = Annotation.objects.order_by("id").filter( Q(project_id=project_id), Q(name__contains=search_keyword))[begin:begin + per_page] records = [] for annotation in annotations: record = {} record['id'] = annotation.id record['name'] = annotation.name record['created_at'] = str(annotation.created_at) record['dataset_id'] = annotation.dataset_id record['archive_url'], record['file_name'] = self.get_archive_url( project_id, annotation.id) annotation_progress = self.get_newest_annotation(annotation.id) record['progress'] = annotation_progress.progress record['status'] = annotation_progress.state records.append(record) contents = {} contents['count'] = self.annotation_total_count(project_id) contents['records'] = records return contents
def list_jobs(cls, project_id, sort_key, is_reverse=False, per_page=PER_PAGE, page=1, search_keyword=""): validation_check(per_page, page) begin = per_page * (page - 1) try: if is_reverse is False: jobs = Job.objects.order_by(sort_key).filter( Q(project_id=project_id), Q(job_type__contains=search_keyword) | Q(job_config__contains=search_keyword) )[begin:begin + per_page] else: jobs = Job.objects.order_by(sort_key).reverse().filter( Q(project_id=project_id), Q(job_type__contains=search_keyword) | Q(job_config__contains=search_keyword) )[begin:begin + per_page] except FieldError: jobs = Job.objects.order_by("id").filter( Q(project_id=project_id), Q(job_type__contains=search_keyword) | Q(job_config__contains=search_keyword) )[begin:begin + per_page] records = [] for job in jobs: record = {} record['id'] = job.id record['job_type'] = job.job_type if job.status not in [STATUS_MAP['succeeded'], STATUS_MAP['failed']]: status, start_time, completion_time = cls.__get_job_status(job.id, job.job_type) job.status = status job.started_at = start_time job.completed_at = completion_time job.save() record['status'] = job.status record['started_at'] = str(job.started_at) if job.started_at else '' record['completed_at'] = str(job.completed_at) if job.completed_at else '' record['registered_at'] = str(job.registered_at) record['job_config'] = job.job_config record['user_id'] = job.user_id records.append(record) contents = {} contents['count'] = cls.job_total_count(project_id) contents['records'] = records return contents
def list(cls, project_id, sort_key=SORT_KEY, is_reverse=False, per_page=PER_PAGE, page=1, search_keyword=""): validation_check(per_page, page) begin = per_page * (page - 1) try: if is_reverse is False: storages = Storage.objects.order_by(sort_key).filter( Q(project_id=project_id), Q(storage_type__contains=search_keyword) | Q(storage_config__contains=search_keyword))[begin:begin + per_page] else: storages = Storage.objects.order_by(sort_key).reverse().filter( Q(project_id=project_id), Q(storage_type__contains=search_keyword) | Q(storage_config__contains=search_keyword))[begin:begin + per_page] except FieldError: storages = Storage.objects.order_by("id").filter( Q(project_id=project_id), Q(storage_type__contains=search_keyword) | Q(storage_config__contains=search_keyword))[begin:begin + per_page] records = [] for storage in storages: record = {} record['id'] = storage.id record['storage_type'] = storage.storage_type record['updated_at'] = str(storage.updated_at) record['storage_config'] = storage.storage_config records.append(record) contents = {} contents['count'] = cls.storage_total_count(project_id) contents['records'] = records return contents
def list(cls, project_id, user_id, sort_key=SORT_KEY, is_reverse=False, per_page=PER_PAGE, page=1, search_keyword=""): validation_check(per_page, page) begin = per_page * (page - 1) try: if is_reverse is False: calibrations = Calibration.objects.order_by(sort_key).filter( Q(project_id=project_id), Q(name__contains=search_keyword))[begin:begin + per_page] else: calibrations = Calibration.objects.order_by( sort_key).reverse().filter( Q(project_id=project_id), Q(name__contains=search_keyword))[begin:begin + per_page] except FieldError: calibrations = Calibration.objects.order_by("id").filter( Q(project_id=project_id), Q(name__contains=search_keyword))[begin:begin + per_page] records = [] for calibration in calibrations: record = {} record['id'] = calibration.id record['created_at'] = str(calibration.created_at) record['name'] = calibration.name record['content'] = calibration.content records.append(record) contents = {} contents['count'] = cls.calibration_total_count(project_id) contents['records'] = records return contents