def sync_imanage_document(task: ExtendedTask, imanage_config_id: int, imanage_doc_id: str):
        task.log_info('Synchronizing iManage document #{0} or config #{1}'.format(imanage_doc_id, imanage_config_id))
        imanage_doc = IManageDocument.objects \
            .filter(imanage_config_id=imanage_config_id, imanage_doc_id=imanage_doc_id) \
            .select_related('imanage_config').get()
        file_storage = get_file_storage()
        try:
            imanage_config = imanage_doc.imanage_config
            log = CeleryTaskLogger(task)
            project = imanage_config.resolve_dst_project(imanage_doc.imanage_doc_data, log)
            project_id = project.pk

            assignee = imanage_config.resolve_assignee(imanage_doc.imanage_doc_data, log)
            assignee_id = assignee.pk if assignee else None
            task.log_info('Assignee resolved to: {0}'.format(assignee.get_full_name() if assignee else '<no assignee>'))

            task.log_info('Downloading iManage document contents into a temp file...')
            auth_token = imanage_config.login()
            filename, response = imanage_config.load_document(auth_token, imanage_doc_id)

            upload_session_id = str(uuid.uuid4())
            filename = get_valid_filename(filename)
            rel_filepath = os.path.join(upload_session_id, filename)

            _, ext = os.path.splitext(filename) if filename else None
            with buffer_contents_into_temp_file(response, ext) as temp_fn:

                # upload file to file storage
                with open(temp_fn, 'rb') as f:
                    file_storage.mk_doc_dir(upload_session_id)
                    file_storage.write_document(rel_filepath, f)

                kwargs = {
                    'document_type_id': imanage_config.document_type_id,
                    'project_id': project_id,
                    'assignee_id': assignee_id,
                    'user_id': get_main_admin_user().pk,
                    'propagate_exception': True,
                    'run_standard_locators': True,
                    'metadata': {},
                    'do_not_check_exists': True
                }

                pre_defined_fields = None
                if imanage_doc.imanage_doc_data and imanage_config.imanage_to_contraxsuite_field_binding:
                    pre_defined_fields = dict()
                    for imanage_field_code, contraxsuite_field_code \
                            in dict(imanage_config.imanage_to_contraxsuite_field_binding).items():
                        imanage_field_value = imanage_doc.imanage_doc_data.get(imanage_field_code)
                        if imanage_field_value:
                            pre_defined_fields[contraxsuite_field_code] = imanage_field_value
                            task.log_info('Assigning iManage field {0} to Contraxsuite field {1}: {2}'
                                          .format(imanage_field_code, contraxsuite_field_code, imanage_field_value))
                        else:
                            task.log_info('iManage field {0} has no value assigned.'
                                          .format(imanage_field_code))
                else:
                    task.log_info('No binding of iManage fields to Contraxsuite fields.')

                document_id = LoadDocuments \
                    .create_document_local(task, temp_fn, rel_filepath, kwargs,
                                           return_doc_id=True,
                                           pre_defined_doc_fields_code_to_python_val=pre_defined_fields)

                if document_id:
                    task.log_info('Created Contraxsuite document #{0}'.format(document_id))
                    imanage_doc.document_id = document_id
                    imanage_doc.last_sync_date = timezone.now()
                    imanage_doc.save(update_fields=['document_id', 'last_sync_date'])
                else:
                    task.log_error('Unable to create Contraxsuite document for '
                                   'iManage document #{0}'.format(imanage_doc_id))
                    raise RuntimeError('No document loaded.')
        except Exception as ex:
            msg = render_error('Unable to synchronize iManage document #{0}'.format(imanage_doc_id), ex)
            task.log_error(msg)
            imanage_doc.import_problem = True
            imanage_doc.save(update_fields=['import_problem'])
示例#2
0
def import_document_type(json_bytes: bytes, save: bool,
                         auto_fix_validation_errors: bool,
                         remove_missed_in_dump_objects: bool,
                         task: ExtendedTask) -> DocumentType:
    tasks = Task.objects \
        .get_active_user_tasks() \
        .exclude(pk=task.task.pk) \
        .distinct('name') \
        .order_by('name') \
        .values_list('name', flat=True)
    tasks = list(tasks)
    if tasks:
        msg = 'The following user tasks are running: {0}. This import can cause their crashing because of document' \
              ' type / field structure changes.'.format(', '.join(tasks))
        raise RuntimeError(msg)

    objects = serializers.deserialize("json", json_bytes.decode("utf-8"))
    document_type = None
    pk_to_field = {}
    field_detectors = []
    other_objects = []
    logger = CeleryTaskLogger(task)
    for deserialized_object in objects:
        obj = deserialized_object.object
        if isinstance(obj, DocumentType):
            if document_type is not None:
                raise RuntimeError('More than one document types was detected')
            document_type = DeserializedDocumentType(
                deserialized_object,
                auto_fix_validation_errors=auto_fix_validation_errors,
                remove_missed_in_dump_objects=remove_missed_in_dump_objects,
                logger=logger)
        elif isinstance(obj, DocumentField):
            field = DeserializedDocumentField(
                deserialized_object,
                auto_fix_validation_errors=auto_fix_validation_errors,
                remove_missed_in_dump_objects=remove_missed_in_dump_objects,
                logger=logger)
            pk_to_field[field.pk] = field
        elif isinstance(obj, DocumentFieldDetector):
            field_detector = DeserializedDocumentFieldDetector(
                deserialized_object,
                auto_fix_validation_errors=auto_fix_validation_errors,
                logger=logger)
            field_detectors.append(field_detector)
        elif isinstance(obj, DocumentFieldCategory):
            category = DeserializedDocumentFieldCategory(
                deserialized_object,
                auto_fix_validation_errors=auto_fix_validation_errors,
                logger=logger)
            other_objects.append(category)
        else:
            raise RuntimeError('Unknown model')

    if document_type is None:
        raise RuntimeError('Unable to find document type')

    conflicting_document_type = DocumentType.objects \
        .filter(code=document_type.object.code) \
        .exclude(pk=document_type.pk) \
        .first()
    if conflicting_document_type is not None:
        err_msg = 'Unable to import document type #{0} "{1}". Database already contains a document type #{2}' \
                  ' with code "{3}"'.format(document_type.pk,
                                            document_type.object.code,
                                            conflicting_document_type.pk,
                                            conflicting_document_type.code)
        raise RuntimeError(err_msg)

    for field_detector in field_detectors:
        field = pk_to_field.get(field_detector.field_pk)
        if field is not None:
            field.add_dependent_object(field_detector)
        else:
            raise RuntimeError('Unknown field #{0}'.format(
                field_detector.field_pk))

    for field in pk_to_field.values():
        if field.document_type_pk == document_type.pk:
            document_type.add_dependent_object(field)
        else:
            raise RuntimeError('Unknown document type #{0}'.format(
                document_type.pk))

    for obj in other_objects:
        document_type.add_dependent_object(obj)

    logger.info('Validation of {0} ...'.format(document_type.object.code))
    validation_errors = document_type.validate()
    logger.info('Validation of {0} is finished'.format(
        document_type.object.code))
    if validation_errors:
        task.log_error(
            '{0} VALIDATION ERRORS HAS OCCURRED DURING VALIDATION OF {1}.'.
            format(len(validation_errors), document_type.object.code))
        for index, validation_error in enumerate(validation_errors):
            # for different timestamps
            sleep(0.001)
            task.log_error('VALIDATION ERROR {0}. {1}'.format(
                index + 1, str(validation_error)))
        raise ValidationError(
            'Validation errors has occurred during import of {0}'.format(
                document_type.object.code))

    if save:
        logger.info('Import of {0} ...'.format(document_type.object.code))
        with transaction.atomic():
            document_type.save()
        logger.info('Import of {0} is finished'.format(
            document_type.object.code))

    return document_type.object
示例#3
0
def import_document_type(json_bytes: bytes, save: bool,
                         auto_fix_validation_errors: bool,
                         remove_missed_in_dump_objects: bool,
                         source_version: int,
                         task: ExtendedTask) -> DocumentType:
    tasks = Task.objects \
        .get_active_user_tasks() \
        .exclude(pk=task.task.pk) \
        .exclude(name__in=[task_names.TASK_NAME_REFRESH_MATERIALIZED_VIEW,
                           task_names.TASK_NAME_CLEAN_ALL_TASKS,
                           task_names.TASK_NAME_CHECK_EMAIL_POOL]) \
        .distinct('name') \
        .order_by('name') \
        .values_list('name', flat=True)

    tasks = list(tasks)
    if tasks:
        msg = f'The following user tasks are running: {", ".join(tasks)}. ' + \
              'This import can cause their crashing because of document ' + \
              'type / field structure changes.'

        raise RuntimeError(msg)

    # check data contains version
    json_str = json_bytes.decode('utf-8')
    json_dict = json.loads(json_str)

    sm = SchemeMigration()
    if isinstance(json_dict, dict):
        # {"version":"75","data":[{"model": ... ]}
        version = json_dict.get('version')
        records = sm.migrate_model_records(json_dict['data'], int(version),
                                           CURRENT_VERSION)
        json_str = json.dumps(records)
    elif source_version != CURRENT_VERSION:
        json_str = sm.migrate_json(json_str, source_version, CURRENT_VERSION)

    for doc_type_subclass in DESERIALIZED_OBJECT_CLASSES:
        doc_type_subclass.init_static()

    objects = serializers.deserialize("json", json_str)
    document_type = None
    pk_to_field = {}
    field_detectors = []
    other_objects = []
    logger = CeleryTaskLogger(task)
    for deserialized_object in objects:
        obj = deserialized_object.object
        if isinstance(obj, DocumentType):
            if document_type is not None:
                raise RuntimeError('More than one document types was detected')
            document_type = DeserializedDocumentType(
                deserialized_object,
                auto_fix_validation_errors=auto_fix_validation_errors,
                remove_missed_in_dump_objects=remove_missed_in_dump_objects,
                logger=logger)
        elif isinstance(obj, DocumentField):
            field = DeserializedDocumentField(
                deserialized_object,
                auto_fix_validation_errors=auto_fix_validation_errors,
                remove_missed_in_dump_objects=remove_missed_in_dump_objects,
                logger=logger)
            pk_to_field[field.pk] = field
        elif isinstance(obj, DocumentFieldDetector):
            field_detector = DeserializedDocumentFieldDetector(
                deserialized_object,
                auto_fix_validation_errors=auto_fix_validation_errors,
                logger=logger)
            field_detectors.append(field_detector)
        elif isinstance(obj, DocumentFieldCategory):
            category = DeserializedDocumentFieldCategory(
                deserialized_object,
                auto_fix_validation_errors=auto_fix_validation_errors,
                logger=logger)
            other_objects.append(category)
        elif isinstance(obj, DocumentFieldFamily):
            family = DeserializedDocumentFieldFamily(
                deserialized_object,
                auto_fix_validation_errors=auto_fix_validation_errors,
                logger=logger)
            other_objects.append(family)
        else:
            raise RuntimeError('Unknown model')

    if document_type is None:
        raise RuntimeError('Unable to find document type')

    conflicting_document_type = DocumentType.objects \
        .filter(code=document_type.object.code) \
        .exclude(pk=document_type.pk) \
        .first()
    if conflicting_document_type is not None:
        err_msg = f'Unable to import document type #{document_type.pk} "{document_type.object.code}". ' +\
                  f'Database already contains a document type #{conflicting_document_type.pk} ' + \
                  f'with code "{conflicting_document_type.code}"'
        raise RuntimeError(err_msg)

    for field_detector in field_detectors:
        field = pk_to_field.get(field_detector.field_pk)
        if field is not None:
            field.add_dependent_object(field_detector)
        else:
            raise RuntimeError(f'Unknown field #{field_detector.field_pk}')

    for field in pk_to_field.values():
        if field.document_type_pk == document_type.pk:
            document_type.add_dependent_object(field)
        else:
            raise RuntimeError(f'Unknown document type #{document_type.pk}')

    for obj in other_objects:
        document_type.add_dependent_object(obj)

    logger.info(f'Validation of {document_type.object.code} ...')
    validation_errors = document_type.validate()
    logger.info(f'Validation of {document_type.object.code} is finished')
    if validation_errors:
        task.log_error(
            f'{len(validation_errors)} VALIDATION ERRORS HAS OCCURRED DURING VALIDATION OF {document_type.object.code}.'
        )
        for index, validation_error in enumerate(validation_errors):
            # for different timestamps
            sleep(0.001)
            task.log_error(f'VALIDATION ERROR {index + 1}. {validation_error}')
        raise ValidationError(
            f'Validation errors has occurred during import of {document_type.object.code}'
        )

    if save:
        logger.info(f'Import of {document_type.object.code} ...')
        with transaction.atomic():
            document_type.save()
        logger.info(f'Import of {document_type.object.code} is finished')

    return document_type.object