예제 #1
0
def pt_base_html(request, project_id, template, params=None, obj=None):
    if request.method == 'POST':
        raise Http404

    params = params if params else {}

    dentries = request.path.split("/")
    verb = dentries[2] if len(dentries) > 3 else "comparison"

    default_params = {
        'projects':
        ProjectModel().pt_get_all(),
        'request':
        request,
        'verb':
        verb,
        'obj':
        obj,
        'pt_version':
        __pt_version__,
        'project':
        ProjectModel.pt_get_by_id(project_id),
        'api_ver':
        API_VER,
        'screens': [('Home', '/%d/home/' % project_id),
                    ('Regressions', '/%d/regression/' % project_id),
                    ('Comparisons', '/%d/comparison/' % project_id),
                    ('Jobs', '/%d/job/' % project_id),
                    ('Hosts', '/%d/hw_farm/' % project_id)]
    }
    params.update(default_params)
    return TemplateResponse(request, template, params)
예제 #2
0
def pt_comparison_all_json(request, api_ver, project_id):
    class ComparisonJson(BaseDatatableView):
        # The model we're going to show
        model = ComparisonModel

        # define the columns that will be returned
        columns = [
            '', 'id', 'updated', 'project', 'env_node', 'suite_ver', 'title',
            'jobs', 'tests_total', 'tests_completed', 'tests_failed'
        ]

        # define column names that will be used in sorting
        # order is important and should be same as order of columns
        # displayed by datatables. For non sortable columns use empty
        # value like ''
        order_columns = [
            '', 'id', 'updated', 'project', 'env_node', 'suite_ver', 'title',
            'jobs', 'tests_total', 'tests_completed', 'tests_failed'
        ]

        # set max limit of records returned, this is used to protect our site if someone tries to attack our site
        # and make it return huge amount of data
        max_display_length = 5000

        def filter_queryset(self, qs):
            # filter by project & deleted only, search filtering is performed in-mem in prepare_results
            if int(project_id) != 0:
                qs = qs.filter(Q(project_id=project_id))

            qs = qs.filter(Q(deleted=False)).prefetch_related(
                "_jobs", "project")
            return qs

        def paging(self, qs):
            # disable default paging, it has stupid default=10. For client-side DataTables we need to return all
            return qs

        def prepare_results(self, qs):
            return ComparisonSimpleSerializer(qs, many=True).data

    if request.method == "POST":
        body_unicode = request.body.decode('utf-8')
        body = json.loads(body_unicode)

        try:
            ComparisonModel.pt_validate_json(body)
        except SuspiciousOperation as e:
            return HttpResponseBadRequest(e)

        cmp = ComparisonModel(project=ProjectModel.pt_get_by_id(project_id))
        try:
            cmp.pt_update(body)
        except SuspiciousOperation as e:
            return HttpResponseBadRequest(e)

        return JsonResponse({'id': cmp.id}, safe=False)

    return ComparisonJson.as_view()(request)
예제 #3
0
def pt_comparison_all_json(request, api_ver, project_id):
    class ComparisonJson(BaseDatatableView):
        # The model we're going to show
        model = ComparisonModel

        # define the columns that will be returned
        columns = [
            '', 'id', 'updated', 'env_node', 'suite_ver', 'title', 'jobs',
            'tests_total', 'tests_completed', 'tests_failed'
        ]

        # define column names that will be used in sorting
        # order is important and should be same as order of columns
        # displayed by datatables. For non sortable columns use empty
        # value like ''
        order_columns = [
            '', 'id', 'updated', 'env_node', 'suite_ver', 'title', 'jobs',
            'tests_total', 'tests_completed', 'tests_failed'
        ]

        # set max limit of records returned, this is used to protect our site if someone tries to attack our site
        # and make it return huge amount of data
        max_display_length = 5000

        def render_column(self, row, column):
            # We want to render user as a custom column
            if column == 'tests_total':
                return '{0} {1}'.format(row.tests_total, row.tests_completed)
            else:
                return super(JobJson, self).render_column(row, column)

        def filter_queryset(self, qs):
            # use parameters passed in GET request to filter queryset

            # simple example:
            search = self.request.GET.get(u'search[value]', None)
            if search:
                qs = qs.filter(
                    Q(title__icontains=search)
                    | Q(suite_ver__icontains=search))

            if project_id != 0:
                qs = qs.filter(Q(project_id=project_id))

            qs = qs.filter(Q(deleted=False))

            return qs

        def prepare_results(self, qs):
            return ComparisonSimpleSerializer(qs, many=True).data

    if request.method == "POST":
        body_unicode = request.body.decode('utf-8')
        body = json.loads(body_unicode)

        try:
            ComparisonModel.pt_validate_json(body)
        except SuspiciousOperation as e:
            return HttpResponseBadRequest(e)

        cmp = ComparisonModel(project=ProjectModel.pt_get_by_id(project_id))
        try:
            cmp.pt_update(body)
        except SuspiciousOperation as e:
            return HttpResponseBadRequest(e)

        return JsonResponse({'id': cmp.id}, safe=False)

    return ComparisonJson.as_view()(request)
예제 #4
0
    def pt_update(self, json_data):
        from perftracker.models.test import TestModel

        j = PTJson(json_data,
                   obj_name="job json",
                   exception_type=SuspiciousOperation)

        project_name = j.get_str('project_name', require=True)
        self.uuid = j.get_uuid('uuid', defval=uuid.uuid1())

        self.title = j.get_str('job_title')
        if not self.title:
            self.title = j.get_str('title', require=True)
        self.cmdline = j.get_str('cmdline')
        self.project = ProjectModel.pt_get_by_name(j.get_str('project_name'))

        append = False if self.deleted else j.get_bool('append')

        now = timezone.now()

        env_nodes_json = j.get_list('env_nodes')
        tests_json = j.get_list('tests')

        key2test = {}
        tests_to_delete = {}
        tests_to_commit = {}
        test_seq_num = 0

        # process existing tests
        if self.id:
            for t in TestModel.objects.filter(job=self):
                test_seq_num = max(t.seq_num, test_seq_num)
                u = str(t.uuid)
                if append:
                    t.pt_validate_uniqueness(key2test)
                    tests_to_commit[u] = t
                else:
                    tests_to_delete[u] = t

        for t in tests_json:
            if not len(t.keys()):
                continue
            u = TestModel.pt_get_uuid(t)
            if u in tests_to_delete:
                tests_to_commit[u] = tests_to_delete[u]
                del tests_to_delete[u]
            else:
                test_seq_num += 1
                try:
                    tests_to_commit[u] = TestModel.objects.get(uuid=u)
                except TestModel.MultipleObjectsReturned:
                    TestModel.objects.filter(uuid=self.uuid).delete()
                    tests_to_commit[u] = TestModel(uuid=u,
                                                   seq_num=test_seq_num)
                except TestModel.DoesNotExist:
                    tests_to_commit[u] = TestModel(uuid=u,
                                                   seq_num=test_seq_num)
            tests_to_commit[u].pt_update(t)
            tests_to_commit[u].pt_validate_uniqueness(key2test)

        self.suite_name = j.get_str('suite_name')
        self.suite_ver = j.get_str('suite_ver')
        self.author = j.get_str('author')
        self.product_name = j.get_str('product_name')
        self.product_ver = j.get_str('product_ver')
        regression_tag = json_data.get('regression_tag', '')

        links = json_data.get('links', None)
        if links == None or links == "":
            self.links = json.dumps({})
        elif isinstance(links, dict):
            self.links = json.dumps(links)
        elif not links.startswith("{"):
            self.links = json.dumps({links: links})
        else:
            self.links = json.dumps(j.get_dict('links'))

        self.upload = now

        begin = j.get_datetime('begin', now)
        end = j.get_datetime('end', now)

        self.tests_total = 0
        self.tests_completed = 0
        self.tests_failed = 0
        self.tests_errors = 0
        self.tests_warnings = 0

        self.testcases_total = 0
        self.testcases_errors = 0

        self.deleted = False

        if not append or j.get_bool(
                'is_edited') or not self.duration or not self.begin:
            self.duration = end - begin
            self.begin = begin
            self.end = end
        else:
            # job is being appended, do correct duration math
            if self.end < begin:  # 1st upload
                self.duration += end - begin
            else:  # subsequent upload
                self.duration += end - self.end
            self.end = end

        if self.begin and (self.begin.tzinfo is None
                           or self.begin.tzinfo.utcoffset(self.begin) is None):
            raise SuspiciousOperation(
                "'begin' datetime object must include timezone: %s" %
                str(self.begin))
        if self.end and (self.end.tzinfo is None
                         or self.end.tzinfo.utcoffset(self.end) is None):
            raise SuspiciousOperation(
                "'end' datetime object must include timezone: %s" %
                str(self.end))

        self.save()

        # process env_nodes, try not to delete and re-create all the nodes each time because normally this is static information
        env_nodes_to_update = EnvNodeModel.pt_find_env_nodes_for_update(
            self, env_nodes_json)
        if env_nodes_to_update:
            EnvNodeModel.objects.filter(job=self).delete()
            for env_node_json in env_nodes_to_update:
                serializer = EnvNodeUploadSerializer(job=self,
                                                     data=env_node_json)
                if serializer.is_valid():
                    serializer.save()
                else:
                    raise SuspiciousOperation(
                        str(serializer.errors) + ", original json: " +
                        str(env_node_json))

        testcases = {}
        #  Test Case (aka Section in comparison) is defined by 2 possible scenarios:
        #    - tests with the same tag and different categories
        #    - tests with no categories, and same group

        for t in tests_to_commit.values():
            t.job = self
            t.pt_save()

            self.tests_total += 1
            if t.pt_status_is_completed():
                self.tests_completed += 1
            test_ok = True
            if t.pt_status_is_failed():
                self.tests_failed += 1
                test_ok = False
            if t.errors:
                test_ok = False
            if t.warnings:
                self.tests_warnings += 1

            self.tests_errors += int(not test_ok)
            testcase = t.tag if t.category else t.group
            if testcase in testcases:
                testcases[testcase] = testcases[testcase] and test_ok
            else:
                testcases[testcase] = test_ok

        self.testcases_total = len(testcases)
        self.testcases_errors = len([1 for ok in testcases.values() if not ok])

        if tests_to_delete:
            TestModel.pt_delete_tests(tests_to_delete.keys())

        if regression_tag is not None:
            from perftracker.models.regression import RegressionModel
            r = RegressionModel.pt_on_job_save(self, regression_tag)
            self.regression_original = r
            self.regression_linked = r

        self.save()
예제 #5
0
    def ptUpdate(self, json_data):
        from perftracker.models.test import TestModel

        self.ptValidateJson(json_data)

        self.title = json_data['job_title']
        self.cmdline = json_data.get('cmdline', None)
        self.uuid = json_data['uuid']
        self.project = ProjectModel.ptGetByName(json_data['project_name'])

        now = timezone.now()

        tests_json = json_data.get('tests', [])
        env_nodes_json = json_data.get('env_nodes', [])

        append = json_data.get('append', False)

        self.suite_name = json_data.get('suite_name', '')
        self.suite_ver = json_data.get('suite_ver', '')
        self.author = json_data.get('author', None)
        self.product_name = json_data.get('product_name', None)
        self.product_ver = json_data.get('product_ver', None)
        self.links = json.dumps(json_data.get('links', None))
        self.regression_tag = json_data.get('regression_tag', '')

        self.upload = now

        begin = parse_datetime(json_data['begin']) if json_data.get(
            'begin', None) else now
        end = parse_datetime(json_data['end']) if json_data.get('end',
                                                                None) else now

        self.tests_total = 0
        self.tests_completed = 0
        self.tests_failed = 0
        self.tests_errors = 0
        self.tests_warnings = 0

        if not self.begin:
            self.begin = begin
        if not self.end:
            self.end = end
        if self.duration:
            self.duration += end - begin
        else:
            self.duration = end - begin

        if self.begin and (self.begin.tzinfo is None
                           or self.begin.tzinfo.utcoffset(self.begin) is None):
            raise SuspiciousOperation(
                "'begin' datetime object must include timezone: %s" %
                str(self.begin))
        if self.end and (self.end.tzinfo is None
                         or self.end.tzinfo.utcoffset(self.end) is None):
            raise SuspiciousOperation(
                "'end' datetime object must include timezone: %s" %
                str(self.end))

        self.save()

        # process env_nodes, try not to delete and re-create all the nodes each time because normally this is static information
        env_nodes_to_update = EnvNodeModel.ptFindEnvNodesForUpdate(
            self, env_nodes_json)
        if env_nodes_to_update:
            EnvNodeModel.objects.filter(job=self).delete()
            for env_node_json in env_nodes_to_update:
                serializer = EnvNodeUploadSerializer(job=self,
                                                     data=env_node_json)
                if serializer.is_valid():
                    serializer.save()
                else:
                    raise SuspiciousOperation(
                        str(serializer.errors) + ", original json: " +
                        str(env_node_json))

        # process tests
        tests = TestModel.objects.filter(job=self)
        test_seq_num = 0
        uuid2test = {}
        for t in tests:
            uuid2test[str(t.uuid)] = t
            if test_seq_num <= t.seq_num:
                test_seq_num = t.seq_num

        for t in tests_json:
            TestModel.ptValidateJson(t)
            test_uuid = t['uuid']

            if test_uuid not in uuid2test:
                uuid2test[test_uuid] = TestModel(job=self, uuid=test_uuid)
                test_seq_num += 1
                uuid2test[test_uuid].seq_num = test_seq_num

            test = uuid2test[test_uuid]

            test.ptUpdate(self, t)

            self.tests_total += 1
            if test.ptStatusIsCompleted():
                self.tests_completed += 1
            if test.ptStatusIsFailed():
                self.tests_failed += 1
            if test.errors:
                self.tests_errors += 1
            if test.warnings:
                self.tests_warnings += 1
            ret = uuid2test.pop(test_uuid, None)

        if json_data.get('replace', False):
            TestModel.ptDeleteTests(uuid2test.keys())

        for t in uuid2test.values():
            self.tests_total += 1
            if t.ptStatusIsCompleted():
                self.tests_completed += 1
            if t.ptStatusIsFailed():
                self.tests_failed += 1
            if t.errors:
                self.tests_errors += 1
            if t.warnings:
                self.tests_warnings += 1

        self.save()
예제 #6
0
파일: job.py 프로젝트: kaato137/perftracker
    def pt_update(self, json_data):
        from perftracker.models.test import TestModel

        j = PTJson(json_data,
                   obj_name="job json",
                   exception_type=SuspiciousOperation)

        project_name = j.get_str('project_name', require=True)
        self.uuid = j.get_uuid('uuid', require=True)

        self.title = j.get_str('job_title')
        if not self.title:
            self.title = j.get_str('title', require=True)
        self.cmdline = j.get_str('cmdline')
        self.project = ProjectModel.pt_get_by_name(j.get_str('project_name'))

        now = timezone.now()

        env_nodes_json = j.get_list('env_nodes')
        tests_json = j.get_list('tests', require=True)

        for t in tests_json:
            if 'uuid' not in t:
                raise SuspiciousOperation("test doesn't have 'uuid' key: %s" %
                                          str(t))
            test = TestModel(job=self, uuid=t['uuid'])
            test.pt_update(self, t, validate_only=True
                           )  # FIXME, double pt_update() call (here and below)

        self.suite_name = j.get_str('suite_name')
        self.suite_ver = j.get_str('suite_ver')
        self.author = j.get_str('author')
        self.product_name = j.get_str('product_name')
        self.product_ver = j.get_str('product_ver')
        self.links = json.dumps(j.get_dict('links'))
        self.regression_tag = json_data.get('regression_tag', '')

        self.upload = now

        begin = j.get_datetime('begin', now)
        end = j.get_datetime('end', now)

        self.tests_total = 0
        self.tests_completed = 0
        self.tests_failed = 0
        self.tests_errors = 0
        self.tests_warnings = 0

        append = False if self.deleted else j.get_bool('append')

        self.deleted = False

        if append:
            if self.duration:
                self.duration += end - begin
            else:
                self.duration = end - begin
            if not self.begin:
                self.begin = begin
            self.end = end
        else:
            self.duration = end - begin
            self.begin = begin
            self.end = end

        if self.begin and (self.begin.tzinfo is None
                           or self.begin.tzinfo.utcoffset(self.begin) is None):
            raise SuspiciousOperation(
                "'begin' datetime object must include timezone: %s" %
                str(self.begin))
        if self.end and (self.end.tzinfo is None
                         or self.end.tzinfo.utcoffset(self.end) is None):
            raise SuspiciousOperation(
                "'end' datetime object must include timezone: %s" %
                str(self.end))

        self.save()

        # process env_nodes, try not to delete and re-create all the nodes each time because normally this is static information
        env_nodes_to_update = EnvNodeModel.pt_find_env_nodes_for_update(
            self, env_nodes_json)
        if env_nodes_to_update:
            EnvNodeModel.objects.filter(job=self).delete()
            for env_node_json in env_nodes_to_update:
                serializer = EnvNodeUploadSerializer(job=self,
                                                     data=env_node_json)
                if serializer.is_valid():
                    serializer.save()
                else:
                    raise SuspiciousOperation(
                        str(serializer.errors) + ", original json: " +
                        str(env_node_json))

        # process tests
        tests = TestModel.objects.filter(job=self)
        test_seq_num = 0
        uuid2test = {}
        for t in tests:
            uuid2test[str(t.uuid)] = t
            if test_seq_num <= t.seq_num:
                test_seq_num = t.seq_num

        for t in tests_json:
            test_uuid = t['uuid']

            if test_uuid not in uuid2test:
                uuid2test[test_uuid] = TestModel(job=self, uuid=test_uuid)
                test_seq_num += 1
                uuid2test[test_uuid].seq_num = test_seq_num

            test = uuid2test[test_uuid]

            test.pt_update(self, t)

            self.tests_total += 1
            if test.pt_status_is_completed():
                self.tests_completed += 1
            if test.pt_status_is_failed():
                self.tests_failed += 1
            if test.errors:
                self.tests_errors += 1
            if test.warnings:
                self.tests_warnings += 1
            ret = uuid2test.pop(test_uuid, None)

        if not append:
            TestModel.pt_delete_tests(uuid2test.keys())

        for t in uuid2test.values():
            self.tests_total += 1
            if t.pt_status_is_completed():
                self.tests_completed += 1
            if t.pt_status_is_failed():
                self.tests_failed += 1
            if t.errors:
                self.tests_errors += 1
            if t.warnings:
                self.tests_warnings += 1

        self.save()
예제 #7
0
    def pt_update(self, json_data):
        from perftracker.models.test import TestModel

        j = PTJson(json_data,
                   obj_name="job json",
                   exception_type=SuspiciousOperation)

        project_name = j.get_str('project_name', require=True)
        self.uuid = j.get_uuid('uuid', defval=uuid.uuid1())

        self.title = j.get_str('job_title')
        if not self.title:
            self.title = j.get_str('title', require=True)
        self.cmdline = j.get_str('cmdline')
        self.project = ProjectModel.pt_get_by_name(j.get_str('project_name'))

        append = False if self.deleted else j.get_bool('append')

        now = timezone.now()

        env_nodes_json = j.get_list('env_nodes')
        tests_json = j.get_list('tests')

        key2test = {}
        tests_to_delete = {}
        tests_to_commit = {}
        test_seq_num = 0

        # process existing tests
        if self.id:
            for t in TestModel.objects.filter(job=self):
                test_seq_num = max(t.seq_num, test_seq_num)
                u = str(t.uuid)
                if append:
                    t.pt_validate_uniqueness(key2test)
                    tests_to_commit[u] = t
                else:
                    tests_to_delete[u] = t

        for t in tests_json:
            if not len(t.keys()):
                continue
            u = TestModel.pt_get_uuid(t)
            if u in tests_to_delete:
                tests_to_commit[u] = tests_to_delete[u]
                del tests_to_delete[u]
            else:
                test_seq_num += 1
                try:
                    tests_to_commit[u] = TestModel.objects.get(uuid=u)
                except TestModel.MultipleObjectsReturned:
                    TestModel.objects.filter(uuid=self.uuid).delete()
                    tests_to_commit[u] = TestModel(uuid=u,
                                                   seq_num=test_seq_num)
                except TestModel.DoesNotExist:
                    tests_to_commit[u] = TestModel(uuid=u,
                                                   seq_num=test_seq_num)
            tests_to_commit[u].pt_update(t)
            tests_to_commit[u].pt_validate_uniqueness(key2test)

        self.suite_name = j.get_str('suite_name')
        self.suite_ver = j.get_str('suite_ver')
        self.author = j.get_str('author')
        self.product_name = j.get_str('product_name')
        self.product_ver = j.get_str('product_ver')
        self.links = json.dumps(j.get_dict('links'))
        regression_tag = json_data.get('regression_tag', '')

        self.upload = now

        begin = j.get_datetime('begin', now)
        end = j.get_datetime('end', now)

        self.tests_total = 0
        self.tests_completed = 0
        self.tests_failed = 0
        self.tests_errors = 0
        self.tests_warnings = 0

        self.deleted = False

        if append and j.get_bool('is_edited') == False:
            if self.duration:
                self.duration += end - begin
            else:
                self.duration = end - begin
            if not self.begin:
                self.begin = begin
            self.end = end
        else:
            self.duration = end - begin
            self.begin = begin
            self.end = end

        if self.begin and (self.begin.tzinfo is None
                           or self.begin.tzinfo.utcoffset(self.begin) is None):
            raise SuspiciousOperation(
                "'begin' datetime object must include timezone: %s" %
                str(self.begin))
        if self.end and (self.end.tzinfo is None
                         or self.end.tzinfo.utcoffset(self.end) is None):
            raise SuspiciousOperation(
                "'end' datetime object must include timezone: %s" %
                str(self.end))

        self.save()

        # process env_nodes, try not to delete and re-create all the nodes each time because normally this is static information
        env_nodes_to_update = EnvNodeModel.pt_find_env_nodes_for_update(
            self, env_nodes_json)
        if env_nodes_to_update:
            EnvNodeModel.objects.filter(job=self).delete()
            for env_node_json in env_nodes_to_update:
                serializer = EnvNodeUploadSerializer(job=self,
                                                     data=env_node_json)
                if serializer.is_valid():
                    serializer.save()
                else:
                    raise SuspiciousOperation(
                        str(serializer.errors) + ", original json: " +
                        str(env_node_json))

        for t in tests_to_commit.values():
            t.job = self
            t.pt_save()

            self.tests_total += 1
            if t.pt_status_is_completed():
                self.tests_completed += 1
            if t.pt_status_is_failed():
                self.tests_failed += 1
            if t.errors:
                self.tests_errors += 1
            if t.warnings:
                self.tests_warnings += 1

        if tests_to_delete:
            TestModel.pt_delete_tests(tests_to_delete.keys())

        if regression_tag is not None:
            from perftracker.models.regression import RegressionModel
            r = RegressionModel.pt_on_job_save(self, regression_tag)
            self.regression_original = r
            self.regression_linked = r

        self.save()
예제 #8
0
    def pt_update(self, json_data):
        from perftracker.models.test import TestModel
        from perftracker.models.test_group import TestGroupModel

        j = PTJson(json_data, obj_name="job json", exception_type=SuspiciousOperation)

        project_name = j.get_str('project_name', require=True)
        self.uuid = j.get_uuid('uuid', defval=uuid.uuid1())

        self.title = j.get_str('job_title')
        if not self.title:
            self.title = j.get_str('title', require=True)
        self.cmdline = j.get_str('cmdline')
        self.project = ProjectModel.pt_get_by_name(j.get_str('project_name'))

        append = False if self.deleted else j.get_bool('append')

        now = timezone.now()

        env_nodes_json = j.get_list('env_nodes')
        tests_json = j.get_list('tests')

        key2test = {}
        tests_to_delete = {}
        tests_to_commit = {}
        test_seq_num = 0
        # triggering populating queryset cache
        tests_from_db_by_uuid, tests_list_from_db = self._pt_db_get(TestModel.objects.filter(job=self))
        # preload test groups
        tests_groups_by_tag, tests_groups = self._pt_db_get(TestGroupModel.objects.all(), value_to_key='tag')

        # process existing tests
        if self.id:
            for t in tests_list_from_db:
                test_seq_num = max(t.seq_num, test_seq_num)
                u = str(t.uuid)
                if append:
                    t.pt_validate_uniqueness(key2test)
                    tests_to_commit[u] = t
                else:
                    tests_to_delete[u] = t

        for t in tests_json:
            if not len(t.keys()):
                continue
            u = TestModel.pt_get_uuid(t)
            if u in tests_to_delete:
                tests_to_commit[u] = tests_to_delete[u]
                del tests_to_delete[u]
            else:
                test_seq_num += 1
                if not tests_to_commit.get(u):
                    tests_to_commit[u] = TestModel(uuid=u, seq_num=test_seq_num)

            tests_to_commit[u].pt_update(t, tests_groups_by_tag)
            tests_to_commit[u].pt_validate_uniqueness(key2test)

        self.suite_name = j.get_str('suite_name')
        self.suite_ver  = j.get_str('suite_ver')
        self.author     = j.get_str('author')
        self.product_name = j.get_str('product_name')
        self.product_ver  = j.get_str('product_ver')
        self.links = json.dumps(j.get_dict('links'))
        regression_tag = json_data.get('regression_tag', '')

        self.upload = now

        begin = j.get_datetime('begin', now)
        end = j.get_datetime('end', now)

        self.tests_total = 0
        self.tests_completed = 0
        self.tests_failed = 0
        self.tests_errors = 0
        self.tests_warnings = 0

        self.deleted = False

        if append and j.get_bool('is_edited') == False:
            if self.duration:
                self.duration += end - begin
            else:
                self.duration = end - begin
            if not self.begin:
                self.begin = begin
            self.end = end
        else:
            self.duration = end - begin
            self.begin = begin
            self.end = end


        if self.begin and (self.begin.tzinfo is None or self.begin.tzinfo.utcoffset(self.begin) is None):
            raise SuspiciousOperation("'begin' datetime object must include timezone: %s" % str(self.begin))
        if self.end and (self.end.tzinfo is None or self.end.tzinfo.utcoffset(self.end) is None):
            raise SuspiciousOperation("'end' datetime object must include timezone: %s" % str(self.end))

        self.save()

        # process env_nodes, try not to delete and re-create all the nodes each time because normally this is static information
        env_nodes_to_update = EnvNodeModel.pt_find_env_nodes_for_update(self, env_nodes_json)
        if env_nodes_to_update:
            EnvNodeModel.objects.filter(job=self).delete()
            for env_node_json in env_nodes_to_update:
                serializer = EnvNodeUploadSerializer(job=self, data=env_node_json)
                if serializer.is_valid():
                    serializer.save()
                else:
                    raise SuspiciousOperation(str(serializer.errors) + ", original json: " + str(env_node_json))

        if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.postgresql':
            bulk_mgr = BulkCreateManager(chunk_size=5000)
            for t in tests_to_commit.values():
                t.job = self
                self.tests_total += 1
                if t.pt_status_is_completed():
                    self.tests_completed += 1
                if t.pt_status_is_failed():
                    self.tests_failed += 1
                if t.errors:
                    self.tests_errors += 1
                if t.warnings:
                    self.tests_warnings += 1
                db_test = tests_from_db_by_uuid.get(str(t.uuid))
                if not db_test:
                    bulk_mgr.add(t)
                elif t.pt_is_equal_to(db_test):
                    continue
            bulk_mgr.done()
        else:
            for t in tests_to_commit.values():
                t.job = self
                t.pt_save()
                self.tests_total += 1
                if t.pt_status_is_completed():
                    self.tests_completed += 1
                if t.pt_status_is_failed():
                    self.tests_failed += 1
                if t.errors:
                    self.tests_errors += 1
                if t.warnings:
                    self.tests_warnings += 1

        if not append and tests_to_delete:
            TestModel.pt_delete_tests(tests_to_delete.keys())

        if regression_tag is not None:
            from perftracker.models.regression import RegressionModel
            r = RegressionModel.pt_on_job_save(self, regression_tag)
            self.regression_original = r
            self.regression_linked   = r

        self.save()