def _export_query_results(job, class_name, unique_id, filename, query, event='queryResultExportCompleted'):

    try:
        job.status = "Exporting"
        job.save()

        # add timestamp to filename to prevent conflicts
        filename = filename + "_" + timestamp()
        result = OGRETL().export(filename, query, 'CSV', geom=None)

        job.data = '/' + filename + ".csv"
        job.save()

        send_message_to_client(job.user.id, dict(event=event,
                                                 class_name=class_name,
                                                 unique_id=unique_id,
                                                 job_id=str(job.hashid)))

        job.status = 'Complete'

    except Exception, e:
        logger.error(e)
        job.status = "Failed"

        exc_type, exc_value, exc_traceback = sys.exc_info()
        readable_exception = traceback.format_exception(exc_type, exc_value, exc_traceback)
        job.data = readable_exception
        job.save()

        send_message_to_client(job.user.id, dict(event=job.type + " failed", trace=readable_exception))
예제 #2
0
def send_fail_message(config_entity, bundle, job, publisher_name, signal_proportion_lookup_name, user):
    job.status = "Failed"
    exc_type, exc_value, exc_traceback = sys.exc_info()
    readable_exception = traceback.format_exception(exc_type, exc_value, exc_traceback)
    job.data = readable_exception
    event = 'postSavePublisherFailed'
    logger.error("Sending Failed message %s for signal %s to user %s with %s readable_exception %s" % \
        (event, signal_proportion_lookup_name, user.username, unicode(bundle), readable_exception))

    send_message_to_client(user.id, dict(
        event=event,
        config_entity_id=config_entity and config_entity.id,
        config_entity_class_name=config_entity and config_entity.__class__.__name__,
        # Send the key since the id of new instances might be meaningless to the client
        # If it hasn't updated the record's id yet
        class_name=bundle.class_name_for_client,
        ids=bundle.client_instance_ids,
        keys=bundle.keys,
        # Used for Features and other things that have a class scope key
        class_key=bundle.class_key,
        publisher_name=publisher_name,
        # The client can display a capitalized version of this to describe the progress
        progress_description=humanize(signal_proportion_lookup_name),
        trace=readable_exception)
    )
    return readable_exception
예제 #3
0
 def send_error(self):
     """
     Send error information to frontend via websockets
     """
     kwargs = self.send_error_kwargs()
     logger.error("Error UploadHandler: Sending error to client: %s" % kwargs)
     send_message_to_client(self.user.id, kwargs)
def _export_layer(job, layer_id):

    try:
        layer = Layer.objects.get(id=layer_id)
        job.status = "Exporting"
        job.save()

        db_entity = layer.db_entity_interest.db_entity
        geometry_type = layer.medium_subclassed.geometry_type
        export_file, filename = export_db_entity_to_file(db_entity, geometry_type)

        job.status = "Zipping"
        job.save()

        zip_file_gdb(export_file)
        shutil.rmtree(export_file)

        job.data = '/' + filename + ".zip"
        job.save()

        send_message_to_client(job.user.id, dict(event='layerExportCompleted',
                                                 layer_id=layer_id,
                                                 job_id=str(job.hashid)))

        job.status = 'Complete'

    except Exception, e:
        job.status = "Failed"

        exc_type, exc_value, exc_traceback = sys.exc_info()
        readable_exception = traceback.format_exception(exc_type, exc_value, exc_traceback)
        job.data = readable_exception
        job.save()

        send_message_to_client(job.user.id, dict(event=job.type + " failed", trace=readable_exception))
예제 #5
0
def _export_query_results(job, class_name, unique_id, filename, query, event='queryResultExportCompleted'):

    try:
        job.status = "Exporting"
        job.save()

        # add timestamp to filename to prevent conflicts
        filename = filename + "_" + timestamp()
        result = OGRETL().export(filename, query, 'CSV', geom=None)

        job.data = '/' + filename + ".csv"
        job.save()

        send_message_to_client(job.user.id, dict(event=event,
                                                 class_name=class_name,
                                                 unique_id=unique_id,
                                                 job_id=str(job.hashid)))

        job.status = 'Complete'

    except Exception, e:
        logger.error(e)
        job.status = "Failed"

        exc_type, exc_value, exc_traceback = sys.exc_info()
        readable_exception = traceback.format_exception(exc_type, exc_value, exc_traceback)
        job.data = readable_exception
        job.save()

        send_message_to_client(job.user.id, dict(event=job.type + " failed", trace=readable_exception))
예제 #6
0
def send_fail_message(config_entity, bundle, job, publisher_name,
                      signal_proportion_lookup_name, user):
    job.status = "Failed"
    exc_type, exc_value, exc_traceback = sys.exc_info()
    readable_exception = traceback.format_exception(exc_type, exc_value,
                                                    exc_traceback)
    job.data = readable_exception
    event = 'postSavePublisherFailed'
    logger.error("Sending Failed message %s for signal %s to user %s with %s readable_exception %s" % \
        (event, signal_proportion_lookup_name, user.username, unicode(bundle), readable_exception))

    send_message_to_client(
        user.id,
        dict(
            event=event,
            config_entity_id=config_entity and config_entity.id,
            config_entity_class_name=config_entity
            and config_entity.__class__.__name__,
            # Send the key since the id of new instances might be meaningless to the client
            # If it hasn't updated the record's id yet
            class_name=bundle.class_name_for_client,
            ids=bundle.client_instance_ids,
            keys=bundle.keys,
            # Used for Features and other things that have a class scope key
            class_key=bundle.class_key,
            publisher_name=publisher_name,
            # The client can display a capitalized version of this to describe the progress
            progress_description=humanize(signal_proportion_lookup_name),
            trace=readable_exception))
    return readable_exception
예제 #7
0
def _export_layer(job, layer_id):

    try:
        layer = Layer.objects.get(id=layer_id)
        job.status = "Exporting"
        job.save()

        db_entity = layer.db_entity_interest.db_entity
        geometry_type = layer.medium_subclassed.geometry_type
        export_file, filename = export_db_entity_to_file(db_entity, geometry_type)

        job.status = "Zipping"
        job.save()

        zip_file_gdb(export_file)
        shutil.rmtree(export_file)

        job.data = '/' + filename + ".zip"
        job.save()

        send_message_to_client(job.user.id, dict(event='layerExportCompleted',
                                                 layer_id=layer_id,
                                                 job_id=str(job.hashid)))

        job.status = 'Complete'

    except Exception, e:
        job.status = "Failed"

        exc_type, exc_value, exc_traceback = sys.exc_info()
        readable_exception = traceback.format_exception(exc_type, exc_value, exc_traceback)
        job.data = readable_exception
        job.save()

        send_message_to_client(job.user.id, dict(event=job.type + " failed", trace=readable_exception))
예제 #8
0
 def send_error(self):
     """
     Send error information to frontend via websockets
     """
     kwargs = self.send_error_kwargs()
     logger.error("Error UploadHandler: Sending error to client: %s" %
                  kwargs)
     send_message_to_client(self.user.id, kwargs)
 def progress(self, proportion, **kwargs):
     send_message_to_client(
         kwargs['user'].id,
         dict(event='postSavePublisherProportionCompleted',
              job_id=str(kwargs['job'].hashid),
              config_entity_id=self.config_entity.id,
              ids=[kwargs['analysis_module'].id],
              class_name='AnalysisModule',
              key=kwargs['analysis_module'].key,
              proportion=proportion))
예제 #10
0
 def report_progress(self, proportion, **kwargs):
     send_message_to_client(kwargs['user'].id, dict(
         event='postSavePublisherProportionCompleted',
         job_id=str(kwargs['job'].hashid),
         config_entity_id=self.config_entity.id,
         ids=[kwargs['analysis_module'].id],
         class_name='AnalysisModule',
         key=kwargs['analysis_module'].key,
         proportion=proportion)
     )
     logger.info("Progress {0}".format(proportion))
예제 #11
0
    def send_progress(self):
        """
        Send progress information to frontend via websockets
        """
        self.load_extra()
        if 'X-Progress-ID' not in self.extra:
            raise Exception('UploadTask must have a X-Progress-ID in'
                            ' order to send messages through websockets')
        if self.progress is None:
            raise Exception('UploadTask cannot send_progress if progress'
                            ' is None')

        kwargs = self.send_progress_kwargs()
        logger.debug("UploadHandler: Sending progress: %s" % kwargs)
        send_message_to_client(self.user.id, kwargs)
예제 #12
0
    def merge_progress(self, proportion, **kwargs):

        send_message_to_client(
            kwargs["user"].id,
            dict(
                event="postSavePublisherProportionCompleted",
                job_id=str(kwargs["job"].hashid),
                config_entity_id=self.config_entity.id,
                ids=[kwargs["analysis_module"].id],
                class_name="AnalysisModule",
                key=kwargs["analysis_module"].key,
                proportion=proportion,
            ),
        )
        logger.info("Progress {0}".format(proportion))
예제 #13
0
    def send_progress(self):
        """
        Send progress information to frontend via websockets
        """
        self.load_extra()
        if 'X-Progress-ID' not in self.extra:
            raise Exception('UploadTask must have a X-Progress-ID in'
                            ' order to send messages through websockets')
        if self.progress is None:
            raise Exception('UploadTask cannot send_progress if progress'
                            ' is None')

        kwargs = self.send_progress_kwargs()
        logger.debug("UploadHandler: Sending progress: %s" % kwargs)
        send_message_to_client(self.user.id, kwargs)
예제 #14
0
def create_db_entity(pg_dump_fpath, db_entity_key, db_entity_name, table_name, layer_count, user, config_entity, **kwargs):
    """
    Create a DbEntity and all associated layers, etc. The majority of the
    processing occurs in the post-save methods on DbEntity objects, this method
    simply gets objects in the necessary state to trigger it.
    """

    logger.debug("Creating DbEntity %s with pg_dump file %s", db_entity_key, pg_dump_fpath)

    if 'upload_task' in kwargs:
        upload_task = kwargs['upload_task']
    else:
        # we're calling this from the command line
        # for testing purposes
        upload_task = create_upload_task(
            user,
            pg_dump_fpath,
            config_entity,
            extra_data_dict={'X-Progress-ID': 'unused'}
        )

    # later post-save processes expect a zipped sql file
    zipped_sql_fpath = "{}.zip".format(pg_dump_fpath)
    with ZipFile(zipped_sql_fpath, 'w') as zipped_sql:
        zipped_sql.write(pg_dump_fpath)

    # the UploadDataset represents the processing of a single file
    upload_dataset_task = UploadDatasetTask.objects.create(
        upload_task=upload_task,
        dataset_id=-1,
        file_path=zipped_sql_fpath,
        filename=db_entity_key,
        progress=upload_task.progress,
        status=upload_task.status,
        extra=upload_task.extra
    )

    # the schema metadata, has information necessary for Django to create
    # new data models based on the upload. The DbEntity post-save
    # logic uses this.
    schema_metadata = get_schema_metadata(pg_dump_fpath, table_name)
    upload_dataset_task.metadata = schema_metadata
    logger.debug("Saving DbEntity %s and inititialzing post-save processing.", db_entity_key)
    upload_dataset_task.save()

    db_entity = DbEntity(
        creator=user,
        updater=user,
        name=db_entity_name,
        key=db_entity_key,
        url='file://{}'.format(zipped_sql_fpath),
        setup_percent_complete=0,
        schema=config_entity.schema()
    )

    # setting `_config_entity` and then calling `save()` triggers
    # the post-save processing flow, which, among other things,
    # loads the data into the database, creates layers and runs
    # required updates to other model objects to be aware of this
    # layer.
    db_entity._config_entity = config_entity
    db_entity.save()

    db_entity.categories.add(
        Category.objects.get(
            key=DbEntityCategoryKey.KEY_CLASSIFICATION,
            value=DbEntityCategoryKey.REFERENCE
        )
    )

    upload_dataset_task.progress = 100
    upload_dataset_task.status = UploadDatasetTask.SUCCESS
    upload_dataset_task.ended_on = timezone.now()
    upload_dataset_task.save()
    upload_dataset_task.send_progress()

    finished_dataset_count = UploadDatasetTask.objects.filter(upload_task=upload_task, status=UploadDatasetTask.SUCCESS).count()
    if finished_dataset_count == layer_count:
        upload_task.progress = 100
        upload_task.status = UploadTask.SUCCESS
        upload_task.ended_on = timezone.now()
        upload_task.save()
        upload_task.send_progress()

    message_kwargs = dict(
        event="doCreateDbEntity",
        id=db_entity.id,
        name=db_entity_name,
        key=db_entity_key,
        config_entity=config_entity.id,
        file_dataset=upload_dataset_task.id
    )

    # send websockets `doCreateDbEntity` signal to the browser
    send_message_to_client(
        user.id,
        message_kwargs
    )
예제 #15
0
                'setup_percent_complete') + 100 * publishing_info['proportion']
            instance.save()
            logger.info("Instance percent after %s" %
                        instance.setup_percent_complete)

    send_message_to_client(
        user.id,
        dict(
            event=event,
            job_id=str(job.hashid),
            config_entity_id=config_entity and config_entity.id,
            config_entity_class_name=config_entity
            and config_entity.__class__.__name__,
            # Send the key since the id of new instances might be meaningless to the client
            # If it hasn't updated the record's id yet
            publisher_name=publishing_info['publisher_name'],
            class_name=bundle.class_name_for_client,
            ids=bundle.client_instance_ids,
            keys=bundle.keys,
            # Used for Features and other things that have a class scope key
            class_key=bundle.class_key,
            # Send the proportion of work that completing this signal signifies--0 to 1
            proportion=publishing_info['proportion'],
            # The client can display a this to describe the progress
            progress_description=humanize(publishing_info['signal_path']),
        ))

    # Find all dependent signals of this one and run each in parallel
    for dependent_signal_path in publishing_info['dependent_signal_paths']:
        # Recurse
        post_save_publishing(dependent_signal_path, config_entity, user,
예제 #16
0
def post_save_publishing(signal_path, config_entity, user, **kwargs):
    """
        The initial entry point and recursive entry point for all post save publishing methods
        :signal_path - the full module path of the signal that called this
        :param kwargs:
            signal_proportion_lookup - A dictionary of signal names to the proportion complete of the overall post save.
            The signal matching signal_path will be sought in the dictionary
            config_entity - The scope of whatever being post-saved, whether a config_entity or something within it
            dependent_signal_paths - Full module signal paths called in sequentially by this publisher
            crud_type - CrudKey.CREATE|CLONE|UPDATE|SYNC|DELETE
            instance_class - Optional. Overrides the class of the instance for use in communicating with the client.
            This is used when the client only cares about a base class, such as Feature or to for DbEntityInterest
            to be a DbEntity
            client_instance_path - Optional. Property path to resolve the instance to another instance for the client.
             (this is only used to convert DbEntityInterest to DbEntity)
    """
    api_key = ApiKey.objects.get(user=user).key

    # Gather instance ids, class, and optional instance keys
    bundle = InstanceBundle(**merge(kwargs, dict(user_id=user.id)))

    # Pass the arguments to the task and run via celery. Note that kwargs is being treated
    # as a dict here and passed along
    logger.info("Django post save: %s" % unicode(bundle))

    # Send the start event to the client if we aren't recursing.
    if not kwargs.get('recurse', False):
        event = 'postSavePublisherStarted'
        logger.info("Sending start message %s to user %s with %s" %
                    (event, user.username, unicode(bundle)))

        send_message_to_client(
            user.id,
            dict(
                event=event,
                config_entity_id=config_entity and config_entity.id,
                config_entity_class_name=config_entity
                and config_entity.__class__.__name__,
                class_name=bundle.class_name_for_client,
                # Always send 0 for initial
                proportion=0,
                ids=bundle.client_instance_ids,
                keys=bundle.keys,
                class_key=bundle.class_key))

    # Start Celery
    logger.info("Starting post save publishing with signal path %s" %
                signal_path)
    job = start_and_track_task(
        _post_save_publishing,
        api_key,
        config_entity,
        user,
        **merge(
            remove_keys(kwargs, ['instance']),
            dict(
                # If we are recursing (already in a celery worker, don't start a new celery task
                # When we get dependency order figured out, we can do this, but there's probably
                # a better way via the Task object or something
                current_job=kwargs.get('job', None),
                signal_path=signal_path,
                crud_type=kwargs.get('crud_type'),
                bundle=bundle)))

    return HttpResponse(job.hashid)
예제 #17
0
def analysis_module_task(job, user, config_entity_id, key, kwargs):

    config_entity = ConfigEntity.objects.get(id=config_entity_id).subclassed
    analysis_module = AnalysisModule.objects.get(config_entity=config_entity,
                                                 key=key)
    # Set again for new instance
    analysis_module._started = True

    try:
        # TODO progress calls should be moved to each module so the status bar increments on the client
        # logger.info('AnalysisModule %s Started for ConfigEntity %s with kwarg keys' % (analysis_module.name, config_entity.name, ', '.join(kwargs or dict().keys())))
        send_message_to_client(
            user.id,
            dict(event='postSavePublisherStarted'.format(capitalize(key)),
                 job_id=str(job.hashid),
                 config_entity_id=config_entity.id,
                 ids=[analysis_module.id],
                 class_name='AnalysisModule',
                 key=analysis_module.key))

        # Call each tool's update method
        for analysis_tool in analysis_module.analysis_tools.all(
        ).select_subclasses():
            updated_kwargs = deepcopy(kwargs)
            updated_kwargs.update(
                dict(analysis_module=analysis_module,
                     user=user,
                     job=job,
                     key=key))
            analysis_tool.update(**updated_kwargs)

        # Call the post save publisher
        from footprint.main.publishing.config_entity_publishing import post_save_config_entity_analysis_module
        post_save_config_entity_analysis_module.send(
            sender=config_entity.__class__,
            config_entity=config_entity,
            analysis_module=analysis_module)

        logger.info('AnalysisModule %s Completed for ConfigEntity %s' %
                    (analysis_module.name, config_entity.name))
        logger.info('Sending message to client postSavePublisherCompleted to user %s for module %s and config entity %s' % \
                     (user.username, analysis_module.name, config_entity.name))
        send_message_to_client(
            user.id,
            dict(event='postSavePublisherCompleted',
                 job_id=str(job.hashid),
                 config_entity_id=config_entity.id,
                 ids=[analysis_module.id],
                 class_name='AnalysisModule',
                 key=analysis_module.key))
        analysis_module.completed = datetime.utcnow().replace(tzinfo=utc)
        analysis_module.save()
        analysis_module._started = False

    except Exception, e:
        try:
            analysis_module.failed = datetime.utcnow().replace(tzinfo=utc)
            analysis_module.save()
        finally:
            analysis_module._started = False

        exc_type, exc_value, exc_traceback = sys.exc_info()
        readable_exception = traceback.format_exception(
            exc_type, exc_value, exc_traceback)
        logger.error(readable_exception)
        send_message_to_client(
            user.id,
            dict(event='postSavePublisherFailed',
                 job_id=str(job.hashid),
                 config_entity_id=config_entity.id,
                 ids=[analysis_module.id],
                 class_name='AnalysisModule',
                 key=analysis_module.key))
        raise Exception(readable_exception)
예제 #18
0
def post_save_publishing(signal_path, config_entity, user, **kwargs):
    """
        The initial entry point and recursive entry point for all post save publishing methods
        :signal_path - the full module path of the signal that called this
        :param kwargs:
            signal_proportion_lookup - A dictionary of signal names to the proportion complete of the overall post save.
            The signal matching signal_path will be sought in the dictionary
            config_entity - The scope of whatever being post-saved, whether a config_entity or something within it
            dependent_signal_paths - Full module signal paths called in sequentially by this publisher
            crud_type - CrudKey.CREATE|CLONE|UPDATE|SYNC|DELETE
            instance_class - Optional. Overrides the class of the instance for use in communicating with the client.
            This is used when the client only cares about a base class, such as Feature or to for DbEntityInterest
            to be a DbEntity
            client_instance_path - Optional. Property path to resolve the instance to another instance for the client.
             (this is only used to convert DbEntityInterest to DbEntity)
    """
    api_key = ApiKey.objects.get(user=user).key

    # Gather instance ids, class, and optional instance keys
    bundle = InstanceBundle(**merge(kwargs, dict(user_id=user.id)))

    # Pass the arguments to the task and run via celery. Note that kwargs is being treated
    # as a dict here and passed along
    logger.info("Django post save: %s" % unicode(bundle))

    # Send the start event to the client if we aren't recursing.
    if not kwargs.get('recurse', False):
        event = 'postSavePublisherStarted'
        logger.info("Sending start message %s to user %s with %s" % (event, user.username, unicode(bundle)))

        send_message_to_client(
            user.id,
            dict(
                event=event,
                config_entity_id=config_entity and config_entity.id,
                config_entity_class_name=config_entity and config_entity.__class__.__name__,
                class_name=bundle.class_name_for_client,
                # Always send 0 for initial
                proportion=0,
                ids=bundle.client_instance_ids,
                keys=bundle.keys,
                class_key=bundle.class_key
            )
        )

    # Start Celery
    logger.info("Starting post save publishing with signal path %s" % signal_path)
    job = start_and_track_task(_post_save_publishing,
                               api_key,
                               config_entity,
                               user,
                               **merge(
                                     remove_keys(kwargs, ['instance']),
                                     dict(
                                         # If we are recursing (already in a celery worker, don't start a new celery task
                                         # When we get dependency order figured out, we can do this, but there's probably
                                         # a better way via the Task object or something
                                         current_job=kwargs.get('job', None),
                                         signal_path=signal_path,
                                         crud_type=kwargs.get('crud_type'),
                                         bundle=bundle
                               )))

    return HttpResponse(job.hashid)
예제 #19
0
def analysis_module_task(job, user, config_entity_id, key, kwargs):

    config_entity = ConfigEntity.objects.get(id=config_entity_id).subclassed
    analysis_module = AnalysisModule.objects.get(config_entity=config_entity, key=key)
    # Set again for new instance
    analysis_module._started = True

    try:
        # TODO progress calls should be moved to each module so the status bar increments on the client
        # logger.info('AnalysisModule %s Started for ConfigEntity %s with kwarg keys' % (analysis_module.name, config_entity.name, ', '.join(kwargs or dict().keys())))
        send_message_to_client(user.id, dict(
            event='postSavePublisherStarted'.format(capitalize(key)),
            job_id=str(job.hashid),
            config_entity_id=config_entity.id,
            ids=[analysis_module.id],
            class_name='AnalysisModule',
            key=analysis_module.key))

        # Call each tool's update method
        for analysis_tool in analysis_module.analysis_tools.all().select_subclasses():
            updated_kwargs = deepcopy(kwargs)
            updated_kwargs.update(dict(analysis_module=analysis_module, user=user, job=job, key=key))
            analysis_tool.update(**updated_kwargs)

        # Call the post save publisher
        from footprint.main.publishing.config_entity_publishing import post_save_config_entity_analysis_module
        post_save_config_entity_analysis_module.send(sender=config_entity.__class__, config_entity=config_entity, analysis_module=analysis_module)

        logger.info('AnalysisModule %s Completed for ConfigEntity %s' % (analysis_module.name, config_entity.name))
        logger.info('Sending message to client postSavePublisherCompleted to user %s for module %s and config entity %s' % \
                     (user.username, analysis_module.name, config_entity.name))
        send_message_to_client(user.id,
                               dict(event='postSavePublisherCompleted',
                                    job_id=str(job.hashid),
                                    config_entity_id=config_entity.id,
                                    ids=[analysis_module.id],
                                    class_name='AnalysisModule',
                                    key=analysis_module.key)
        )
        analysis_module.completed = datetime.utcnow().replace(tzinfo=utc)
        analysis_module.save()
        analysis_module._started = False

    except Exception, e:
        try:
            analysis_module.failed = datetime.utcnow().replace(tzinfo=utc)
            analysis_module.save()
        finally:
            analysis_module._started = False

        exc_type, exc_value, exc_traceback = sys.exc_info()
        readable_exception = traceback.format_exception(exc_type, exc_value, exc_traceback)
        logger.error(readable_exception)
        send_message_to_client(user.id,
                               dict(event='postSavePublisherFailed',
                                    job_id=str(job.hashid),
                                    config_entity_id=config_entity.id,
                                    ids=[analysis_module.id],
                                    class_name='AnalysisModule',
                                    key=analysis_module.key
                               )
        )
        raise Exception(readable_exception)
예제 #20
0
            # This creates an update statement to increment to the setup_percent_complete field
            # by the given proportion
            logger.info("Instance percent before %s" % instance.setup_percent_complete)
            instance.setup_percent_complete = F('setup_percent_complete') + 100*publishing_info['proportion']
            instance.save()
            logger.info("Instance percent after %s" % instance.setup_percent_complete)

    send_message_to_client(user.id, dict(
        event=event,
        job_id=str(job.hashid),
        config_entity_id=config_entity and config_entity.id,
        config_entity_class_name=config_entity and config_entity.__class__.__name__,
        # Send the key since the id of new instances might be meaningless to the client
        # If it hasn't updated the record's id yet
        publisher_name=publishing_info['publisher_name'],
        class_name=bundle.class_name_for_client,
        ids=bundle.client_instance_ids,
        keys=bundle.keys,
        # Used for Features and other things that have a class scope key
        class_key=bundle.class_key,
        # Send the proportion of work that completing this signal signifies--0 to 1
        proportion=publishing_info['proportion'],
        # The client can display a this to describe the progress
        progress_description=humanize(publishing_info['signal_path']),
    ))

    # Find all dependent signals of this one and run each in parallel
    for dependent_signal_path in publishing_info['dependent_signal_paths']:
        # Recurse
        post_save_publishing(
            dependent_signal_path,
            config_entity,
예제 #21
0
def create_db_entity(pg_dump_fpath, db_entity_key, db_entity_name, table_name,
                     layer_count, user, config_entity, **kwargs):
    """
    Create a DbEntity and all associated layers, etc. The majority of the
    processing occurs in the post-save methods on DbEntity objects, this method
    simply gets objects in the necessary state to trigger it.
    """

    logger.debug("Creating DbEntity %s with pg_dump file %s", db_entity_key,
                 pg_dump_fpath)

    if 'upload_task' in kwargs:
        upload_task = kwargs['upload_task']
    else:
        # we're calling this from the command line
        # for testing purposes
        upload_task = create_upload_task(
            user,
            pg_dump_fpath,
            config_entity,
            extra_data_dict={'X-Progress-ID': 'unused'})

    # later post-save processes expect a zipped sql file
    zipped_sql_fpath = "{}.zip".format(pg_dump_fpath)
    with ZipFile(zipped_sql_fpath, 'w') as zipped_sql:
        zipped_sql.write(pg_dump_fpath)

    # the UploadDataset represents the processing of a single file
    upload_dataset_task = UploadDatasetTask.objects.create(
        upload_task=upload_task,
        dataset_id=-1,
        file_path=zipped_sql_fpath,
        filename=db_entity_key,
        progress=upload_task.progress,
        status=upload_task.status,
        extra=upload_task.extra)

    # the schema metadata, has information necessary for Django to create
    # new data models based on the upload. The DbEntity post-save
    # logic uses this.
    schema_metadata = get_schema_metadata(pg_dump_fpath, table_name)
    upload_dataset_task.metadata = schema_metadata
    logger.debug("Saving DbEntity %s and inititialzing post-save processing.",
                 db_entity_key)
    upload_dataset_task.save()

    db_entity = DbEntity(creator=user,
                         updater=user,
                         name=db_entity_name,
                         key=db_entity_key,
                         url='file://{}'.format(zipped_sql_fpath),
                         setup_percent_complete=0,
                         schema=config_entity.schema())

    # setting `_config_entity` and then calling `save()` triggers
    # the post-save processing flow, which, among other things,
    # loads the data into the database, creates layers and runs
    # required updates to other model objects to be aware of this
    # layer.
    db_entity._config_entity = config_entity
    db_entity.save()

    db_entity.categories.add(
        Category.objects.get(key=DbEntityCategoryKey.KEY_CLASSIFICATION,
                             value=DbEntityCategoryKey.REFERENCE))

    upload_dataset_task.progress = 100
    upload_dataset_task.status = UploadDatasetTask.SUCCESS
    upload_dataset_task.ended_on = timezone.now()
    upload_dataset_task.save()
    upload_dataset_task.send_progress()

    finished_dataset_count = UploadDatasetTask.objects.filter(
        upload_task=upload_task, status=UploadDatasetTask.SUCCESS).count()
    if finished_dataset_count == layer_count:
        upload_task.progress = 100
        upload_task.status = UploadTask.SUCCESS
        upload_task.ended_on = timezone.now()
        upload_task.save()
        upload_task.send_progress()

    message_kwargs = dict(event="doCreateDbEntity",
                          id=db_entity.id,
                          name=db_entity_name,
                          key=db_entity_key,
                          config_entity=config_entity.id,
                          file_dataset=upload_dataset_task.id)

    # send websockets `doCreateDbEntity` signal to the browser
    send_message_to_client(user.id, message_kwargs)