Exemple #1
0
def delete_archive_by_date(d):
    """
    Delete archived files for the day from storage,
    must be used with caution
    """
    if settings.LOGGEDPOINT_ARCHIVE_DELETE_DISABLED:
        raise Exception(
            "The feature to delete logged point arhive is disabled.")
    answer = user_confirm(
        "Are you sure you want to delete the loggedpoint archives for the day({})?(Y/N):"
        .format(d), ("Y", "N"))
    if answer != 'Y':
        return
    archive_group = get_archive_group(d)
    archive_id = get_archive_id(d)
    resource_id = "{}.gpkg".format(archive_id)
    vrt_id = get_vrt_id(archive_group)

    work_folder = None
    resource_repository = get_resource_repository()
    try:
        del_metadata = resource_repository.delete_resource(
            archive_group, resource_id)
        groupmetadatas = [
            m for m in resource_repository.metadata_client.resource_metadatas(
                resource_group=archive_group, throw_exception=True)
        ]

        vrt_metadata = next(m for m in groupmetadatas
                            if m["resource_id"] == vrt_id)

        vrt_metadata["features"] = 0
        for m in groupmetadatas:
            if m["resource_id"] == vrt_id:
                continue
            vrt_metadata["features"] += m["features"]

        layers = [(m["resource_id"], m["resource_file"])
                  for m in groupmetadatas if m["resource_id"] != vrt_id]
        if layers:
            work_folder = tempfile.mkdtemp(prefix="delete_archive")
            layers.sort(key=lambda o: o[0])
            layers = os.linesep.join(
                individual_layer.format(m[0], m[1]) for m in layers)
            vrt_data = vrt.format(archive_group, layers)
            vrt_filename = os.path.join(work_folder, "loggedpoint.vrt")
            with open(vrt_filename, "w") as f:
                f.write(vrt_data)

            vrt_metadata["file_md5"] = utils.file_md5(vrt_filename)
            resourcemetadata = resource_repository.push_file(
                vrt_filename,
                vrt_metadata,
                f_post_push=_set_end_datetime("updated"))
        else:
            #all archives in the group were deleted
            resource_repository.delete_resource(archive_group, vrt_id)
    finally:
        utils.remove_folder(work_folder)
        pass
Exemple #2
0
 def init(self):
     # load record
     if not os.path.exists('.cache'):
         os.mkdir('.cache')
     if os.path.exists('.cache/record.yaml'):
         record = utils.yaml_load('.cache/record.yaml')
         logger.info('record found, load record')
     else:
         record = {'hash': []}
         utils.yaml_dump(record, '.cache/record.yaml')
         logger.info('no record found, create new record')
     cache_flag = False
     # get file list
     file_list = self._get_file_list()
     for path, type_ in file_list:
         fhash = utils.file_md5(path)
         assert len(fhash) > 0
         if fhash not in record['hash']:
             # add new data to cache
             cache_flag = True
             record['hash'].append(fhash)
             logger.info('New file {0} {1} found, add to cache'.format(
                 path, type_))
             prep_sym(path, type_)
             logger.info('{0} {1} add to cache success'.format(path, type_))
             utils.yaml_dump(record, '.cache/record.yaml')
     if cache_flag:
         logger.info('Cache data update success')
         prep_wm()
         prep_vec()
     else:
         logger.info('Data up to date, use cache data')
Exemple #3
0
def download_by_date(d, folder=None, overwrite=False):
    """
    Download the loggedpoint from archived files for the day
    """
    archive_group = get_archive_group(d)
    archive_id = get_archive_id(d)
    resource_id = "{}.gpkg".format(archive_id)
    logger.info(
        "Begin to download archived loggedpoint, archive_group={},archive_id={}"
        .format(archive_group, archive_id))
    resource_repository = get_resource_repository()
    folder = folder or tempfile.mkdtemp(
        prefix="loggedpoint{}".format(d.strftime("%Y-%m-%d")))
    metadata, filename = resource_repository.download_resource(
        archive_group,
        resource_id,
        filename=os.path.join(folder, resource_id),
        overwrite=overwrite)
    file_md5 = utils.file_md5(filename)
    if metadata["file_md5"] != file_md5:
        raise Exception(
            "Download loggedpoint archive file failed.source file's md5={}, downloaded file's md5={}"
            .format(metadata["file_md5"], file_md5))

    layer_metadata = gdal.get_layers(filename)[0]
    if metadata["features"] != layer_metadata["features"]:
        raise Exception(
            "Download loggedpoint archive file failed.source file's features={}, downloaded file's features={}"
            .format(metadata["features"], layer_metadata["features"]))

    logger.info(
        "End to download archived loggedpoint, archive_group={},archive_id={},dowloaded_file={},features={}"
        .format(archive_group, archive_id, filename, metadata["features"]))
    return (metadata, filename)
Exemple #4
0
def _archive_file(storage,
                  f,
                  resource_id,
                  checking_policy,
                  check_md5,
                  metadata={}):
    #push the updated or new files into storage
    file_status = os.stat(f)
    file_modify_date = file_status.st_mtime_ns
    file_size = file_status.st_size
    if check_md5:
        file_md5 = utils.file_md5(f)
    else:
        file_md5 = None

    try:
        res_metadata = storage.get_resource_metadata(resource_id)
    except ResourceNotFound as ex:
        res_metadata = None

    is_changed = False
    for policy in checking_policy:
        if policy == FILE_MD5:
            if not res_metadata or res_metadata.get("file_md5") != file_md5:
                is_changed = True
                break
        elif policy == FILE_MODIFY_DATE:
            if not res_metadata or res_metadata.get(
                    "file_modify_date") != file_modify_date:
                is_changed = True
                break
        elif policy == FILE_SIZE:
            if not res_metadata or res_metadata.get("file_msize") != file_size:
                is_changed = True
                break
        else:
            raise Exception("Checking policy({}) Not Support".format(policy))

    if not is_changed:
        logger.debug(
            "File({},{}) is not changed, no need to archive again".format(
                f, resource_id))
        return False

    metadata["archive_time"] = timezone.now()
    metadata["resource_id"] = resource_id
    metadata["file_modify_date"] = file_modify_date
    metadata["file_size"] = file_size
    if check_md5:
        metadata["file_md5"] = file_md5

    storage.push_file(f, metadata=metadata)
    logger.debug("File({},{}) was archived successfully.".format(
        f, resource_id))
    return True
Exemple #5
0
    def update_metadata(self, force = False):
        if self.content_path:
            mimetype, encoding = mimetypes.guess_type(self.content_path)

            if force or not self.content_type:
                self.content_type = mimetype
            if force or not self.content_encoding:
                self.content_encoding = encoding

        if force or self.size is None:
            self.size = os.path.getsize(self.abspath)
        if force or not self.md5:
            self.md5 = file_md5(self.abspath)
Exemple #6
0
def rearchive_from_archive_table_by_date(d,
                                         check=False,
                                         backup_folder=None,
                                         max_diff=100):
    """
    Archive the resouce tracking history from archive table by start_date(inclusive), end_date(exclusive)
    check: check whether archiving is succeed or not
    """
    archive_group = get_archive_group(d)
    archive_id = get_archive_id(d)
    start_date = timezone.datetime(d.year, d.month, d.day)
    end_date = start_date + timedelta(days=1)
    backup_table = get_backup_table(d)

    db = settings.DATABASE
    resource_id = "{}.gpkg".format(archive_id)
    metadata = {
        "start_archive": timezone.now(),
        "resource_id": resource_id,
        "resource_group": archive_group,
        "start_archive_date": start_date,
        "end_archive_date": end_date
    }

    filename = None
    vrt_filename = None
    work_folder = tempfile.mkdtemp(prefix="archive_loggedpoint")
    resourcemetadata = None
    try:
        logger.info(
            "Begin to rearchive loggedpoint from archive table '{}', archive_group={},archive_id={},start_date={},end_date={}"
            .format(backup_table, archive_group, archive_id, start_date,
                    end_date))
        resource_repository = get_resource_repository()
        try:
            res_metadata = resource_repository.get_resource_metadata(
                archive_group, resource_id)
            archived_count = res_metadata["features"]
        except:
            archived_count = 0

        sql = archive_from_archive_table_sql.format(
            backup_table, start_date.strftime(datetime_pattern),
            end_date.strftime(datetime_pattern))
        #export the archived data as geopackage
        export_result = db.export_spatial_data(sql,
                                               filename=os.path.join(
                                                   work_folder,
                                                   "loggedpoint.gpkg"),
                                               layer=archive_id)
        if not export_result:
            #no data to archive
            if archived_count:
                logger.info(
                    "The loggedpoint has already been archived. archive_id={0},start_archive_date={1},end_archive_date={2}"
                    .format(archive_id, start_date, end_date))
            else:
                logger.info(
                    "No loggedpoints to archive, archive_group={},archive_id={},start_date={},end_date={}"
                    .format(archive_group, archive_id, start_date, end_date))
            return

        if archived_count:
            if backup_folder:
                download_by_date(d, backup_folder)

        layer_metadata, filename = export_result
        if max_diff and abs(archived_count -
                            layer_metadata["features"]) > max_diff:
            raise Exception(
                "The difference({}) between the archived features({}) and the rearchived features({}) is greater than the max difference({})"
                .format(abs(archived_count - layer_metadata["features"]),
                        archived_count, layer_metadata["features"], max_diff))

        layer_metadata, filename = export_result
        metadata["file_md5"] = utils.file_md5(filename)
        metadata["layer"] = layer_metadata["layer"]
        metadata["features"] = layer_metadata["features"]
        #upload archive file
        logger.debug(
            "Begin to push loggedpoint archive file to blob storage, archive_group={},archive_id={},start_date={},end_date={}"
            .format(archive_group, archive_id, start_date, end_date))
        resourcemetadata = resource_repository.push_file(
            filename, metadata, f_post_push=_set_end_datetime("end_archive"))
        if check:
            #check whether uploaded succeed or not
            logger.debug(
                "Begin to check whether loggedpoint archive file was pushed to blob storage successfully, archive_group={},archive_id={},start_date={},end_date={}"
                .format(archive_group, archive_id, start_date, end_date))
            d_metadata, d_filename = resource_repository.download_resource(
                archive_group,
                resource_id,
                filename=os.path.join(work_folder,
                                      "loggedpoint_download.gpkg"))
            d_file_md5 = utils.file_md5(d_filename)
            if metadata["file_md5"] != d_file_md5:
                raise Exception(
                    "Upload loggedpoint archive file failed.source file's md5={}, uploaded file's md5={}"
                    .format(metadata["file_md5"], d_file_md5))

            d_layer_metadata = gdal.get_layers(d_filename)[0]
            if d_layer_metadata["features"] != layer_metadata["features"]:
                raise Exception(
                    "Upload loggedpoint archive file failed.source file's features={}, uploaded file's features={}"
                    .format(layer_metadata["features"],
                            d_layer_metadata["features"]))

        #update vrt file
        logger.debug(
            "Begin to update vrt file to union all spatial files in the same group, archive_group={},archive_id={},start_date={},end_date={}"
            .format(archive_group, archive_id, start_date, end_date))
        groupmetadata = resourcemetadata[archive_group]
        vrt_id = get_vrt_id(archive_group)
        try:
            vrt_metadata = next(m for m in groupmetadata.values()
                                if m["resource_id"] == vrt_id)
        except StopIteration as ex:
            vrt_metadata = {
                "resource_id": vrt_id,
                "resource_file": vrt_id,
                "resource_group": archive_group
            }

        vrt_metadata["features"] = 0
        for m in groupmetadata.values():
            if m["resource_id"] == vrt_id:
                continue
            vrt_metadata["features"] += m["features"]

        layers = [(m["layer"], m["resource_file"])
                  for m in groupmetadata.values()
                  if m["resource_id"] != vrt_id]
        layers.sort(key=lambda o: o[0])
        layers = os.linesep.join(
            individual_layer.format(m[0], m[1]) for m in layers)
        vrt_data = vrt.format(get_vrt_layername(archive_group), layers)
        vrt_filename = os.path.join(work_folder, "loggedpoint.vrt")
        with open(vrt_filename, "w") as f:
            f.write(vrt_data)

        vrt_metadata["file_md5"] = utils.file_md5(vrt_filename)

        resourcemetadata = resource_repository.push_file(
            vrt_filename,
            vrt_metadata,
            f_post_push=_set_end_datetime("updated"))
        if check:
            #check whether uploaded succeed or not
            logger.debug(
                "Begin to check whether the group vrt file was pused to blob storage successfully, archive_group={},archive_id={},start_date={},end_date={}"
                .format(archive_group, archive_id, start_date, end_date))
            d_vrt_metadata, d_vrt_filename = resource_repository.download_resource(
                archive_group,
                vrt_id,
                filename=os.path.join(work_folder, "loggedpoint_download.vrt"))
            d_vrt_file_md5 = utils.file_md5(d_vrt_filename)
            if vrt_metadata["file_md5"] != d_vrt_file_md5:
                raise Exception(
                    "Upload vrt file failed.source file's md5={}, uploaded file's md5={}"
                    .format(vrt_metadata["file_md5"], d_vrt_file_md5))

        logger.info(
            "End to archive loggedpoint from archive table '{}', archive_group={},archive_id={},start_date={},end_date={},archived features={}"
            .format(backup_table, archive_group, archive_id, start_date,
                    end_date, layer_metadata["features"]))
        return metadata

    finally:
        utils.remove_folder(work_folder)
        pass
Exemple #7
0
def archive(archive_group,
            archive_id,
            start_date,
            end_date,
            delete_after_archive=False,
            check=False,
            overwrite=False,
            backup_table=None,
            rearchive=False,
            source_table="tracking_loggedpoint"):
    """
    Archive the resouce tracking history by start_date(inclusive), end_date(exclusive)
    archive_id: a unique identity of the archive file. that means different start_date and end_date should have a different archive_id
    overwrite: False: raise exception if archive_id already exists; True: overwrite the existing archive file
    rearchive: if true, rearchive the existing archived file;if false, throw exception if already archived 
    delete_after_archive: delete the archived data from table tracking_loggedpoint
    check: check whether archiving is succeed or not
    return a tuple (archived or not, archive type(archive,overwrite,rearchive),archived_metadata)
    """
    db = settings.DATABASE
    resource_id = "{}.gpkg".format(archive_id)
    metadata = {
        "start_archive": timezone.now(),
        "resource_id": resource_id,
        "resource_group": archive_group,
        "start_archive_date": start_date,
        "end_archive_date": end_date
    }

    if rearchive:
        overwrite = True

    filename = None
    vrt_filename = None
    work_folder = tempfile.mkdtemp(prefix="archive_loggedpoint")
    resourcemetadata = None
    archive_type = "archive"
    try:
        logger.info(
            "Begin to archive loggedpoint, archive_group={},archive_id={},start_date={},end_date={}"
            .format(archive_group, archive_id, start_date, end_date))
        resource_repository = get_resource_repository()
        sql = archive_sql.format(source_table,
                                 start_date.strftime(datetime_pattern),
                                 end_date.strftime(datetime_pattern))
        if db.count(sql) == 0:
            #no data to archive
            if resource_repository.is_exist(archive_group, resource_id):
                logger.info(
                    "The loggedpoint has already been archived. archive_id={0},start_archive_date={1},end_archive_date={2}"
                    .format(archive_id, start_date, end_date))
            else:
                logger.info(
                    "No loggedpoints to archive, archive_group={},archive_id={},start_date={},end_date={}"
                    .format(archive_group, archive_id, start_date, end_date))
            return (False, None, None)

        if resource_repository.is_exist(archive_group, resource_id):
            #already archived, restore the data
            if not overwrite:
                #in normal mode
                raise ResourceAlreadyExist(
                    "The loggedpoint has already been archived. archive_id={0},start_archive_date={1},end_archive_date={2}"
                    .format(archive_id, start_date, end_date))
            elif rearchive:
                #in rearchive mode. restore the data to original table
                logger.info(
                    "In rearchive mode, The resource '{}' in blob storage will be restored and archived again"
                    .format(resource_id))
                logger.debug(
                    "Begin to restore the data({0}) from blob storage to table 'tracking_loggedpoint'"
                    .format(resource_id))
                restore_by_archive(archive_group,
                                   archive_id,
                                   restore_to_origin_table=True,
                                   preserve_id=True)
                logger.debug(
                    "End to restore the data({0}) from blob storage to table 'tracking_loggedpoint'"
                    .format(resource_id))
                if db.is_table_exist(backup_table):
                    logger.debug(
                        "Begin to delete the data from backup table '{}'".
                        format(backup_table))
                    count = db.update(
                        delete_backup_sql.format(
                            start_date.strftime(datetime_pattern),
                            end_date.strftime(datetime_pattern), backup_table))
                    logger.debug(
                        "End to delete {1} features from backup table {0}".
                        format(backup_table, count))
                archive_type = "rearchive"
            else:
                #in overwrite mode.
                logger.info(
                    "In overwrite mode, The resource '{}' in blob storage will be overwrided"
                    .format(resource_id))
                archive_type = "overwrite"

        #export the archived data as geopackage
        export_result = db.export_spatial_data(sql,
                                               filename=os.path.join(
                                                   work_folder,
                                                   "loggedpoint.gpkg"),
                                               layer=archive_id)
        if not export_result:
            logger.info(
                "No loggedpoints to archive, archive_group={},archive_id={},start_date={},end_date={}"
                .format(archive_group, archive_id, start_date, end_date))
            return (False, None, None)

        layer_metadata, filename = export_result
        metadata["file_md5"] = utils.file_md5(filename)
        metadata["layer"] = layer_metadata["layer"]
        metadata["features"] = layer_metadata["features"]
        #upload archive file
        logger.debug(
            "Begin to push loggedpoint archive file to blob storage, archive_group={},archive_id={},start_date={},end_date={}"
            .format(archive_group, archive_id, start_date, end_date))
        resourcemetadata = resource_repository.push_file(
            filename, metadata, f_post_push=_set_end_datetime("end_archive"))
        if check:
            #check whether uploaded succeed or not
            logger.debug(
                "Begin to check whether loggedpoint archive file was pushed to blob storage successfully, archive_group={},archive_id={},start_date={},end_date={}"
                .format(archive_group, archive_id, start_date, end_date))
            d_metadata, d_filename = resource_repository.download_resource(
                archive_group,
                resource_id,
                filename=os.path.join(work_folder,
                                      "loggedpoint_download.gpkg"))
            d_file_md5 = utils.file_md5(d_filename)
            if metadata["file_md5"] != d_file_md5:
                raise Exception(
                    "Upload loggedpoint archive file failed.source file's md5={}, uploaded file's md5={}"
                    .format(metadata["file_md5"], d_file_md5))

            d_layer_metadata = gdal.get_layers(d_filename)[0]
            if d_layer_metadata["features"] != layer_metadata["features"]:
                raise Exception(
                    "Upload loggedpoint archive file failed.source file's features={}, uploaded file's features={}"
                    .format(layer_metadata["features"],
                            d_layer_metadata["features"]))

        #update vrt file
        logger.debug(
            "Begin to update vrt file to union all spatial files in the same group, archive_group={},archive_id={},start_date={},end_date={}"
            .format(archive_group, archive_id, start_date, end_date))
        groupmetadata = resourcemetadata[archive_group]
        vrt_id = get_vrt_id(archive_group)
        try:
            vrt_metadata = next(m for m in groupmetadata.values()
                                if m.get("resource_id") == vrt_id)
        except StopIteration as ex:
            vrt_metadata = {
                "resource_id": vrt_id,
                "resource_file": vrt_id,
                "resource_group": archive_group
            }

        vrt_metadata["features"] = 0
        for m in groupmetadata.values():
            if not m.get("resource_id") or m.get("resource_id") == vrt_id:
                continue
            vrt_metadata["features"] += m["features"]

        layers = [(m["layer"], m["resource_file"])
                  for m in groupmetadata.values()
                  if m.get("resource_id") and m.get("resource_id") != vrt_id]
        layers.sort(key=lambda o: o[0])
        layers = os.linesep.join(
            individual_layer.format(m[0], m[1]) for m in layers)
        vrt_data = vrt.format(get_vrt_layername(archive_group), layers)
        vrt_filename = os.path.join(work_folder, "loggedpoint.vrt")
        with open(vrt_filename, "w") as f:
            f.write(vrt_data)

        vrt_metadata["file_md5"] = utils.file_md5(vrt_filename)

        resourcemetadata = resource_repository.push_file(
            vrt_filename,
            vrt_metadata,
            f_post_push=_set_end_datetime("updated"))
        if check:
            #check whether uploaded succeed or not
            logger.debug(
                "Begin to check whether the group vrt file was pused to blob storage successfully, archive_group={},archive_id={},start_date={},end_date={}"
                .format(archive_group, archive_id, start_date, end_date))
            d_vrt_metadata, d_vrt_filename = resource_repository.download_resource(
                archive_group,
                vrt_id,
                filename=os.path.join(work_folder, "loggedpoint_download.vrt"))
            d_vrt_file_md5 = utils.file_md5(d_vrt_filename)
            if vrt_metadata["file_md5"] != d_vrt_file_md5:
                raise Exception(
                    "Upload vrt file failed.source file's md5={}, uploaded file's md5={}"
                    .format(vrt_metadata["file_md5"], d_vrt_file_md5))

        if backup_table:
            if not db.is_table_exist(backup_table):
                #table doesn't exist, create the table and indexes
                sql = create_backup_table_sql.format(backup_table)
                db.executeDDL(sql)

            sql = backup_sql.format(start_date.strftime(datetime_pattern),
                                    end_date.strftime(datetime_pattern),
                                    backup_table)
            count = db.update(sql)
            if count == layer_metadata["features"]:
                logger.debug(
                    "Backup {1} features to backup table {0},sql={2}".format(
                        backup_table, count, sql))
            else:
                raise Exception(
                    "Only backup {1}/{2} features to backup table {0}".format(
                        backup_table, count, layer_metadata["features"]))

        if delete_after_archive:
            logger.debug(
                "Begin to delete archived data, archive_group={},archive_id={},start_date={},end_date={}"
                .format(archive_group, archive_id, start_date, end_date))

            delete_sql = del_sql.format(start_date.strftime(datetime_pattern),
                                        end_date.strftime(datetime_pattern))
            deleted_rows = db.update(delete_sql)
            logger.debug(
                "Delete {} rows from table tracking_loggedpoint, archive_group={},archive_id={},start_date={},end_date={};sql={}"
                .format(deleted_rows, archive_group, archive_id, start_date,
                        end_date, delete_sql))

        logger.info(
            "End to archive loggedpoint, archive_group={},archive_id={},start_date={},end_date={},archived features={}"
            .format(archive_group, archive_id, start_date, end_date,
                    layer_metadata["features"]))
        return (True, archive_type, metadata)

    finally:
        utils.remove_folder(work_folder)
        pass
Exemple #8
0
            cached_image, temp_rect = utils.load_image("{0}_processed.png".format(stage_file))
            self.blit(cached_image, (0, 0))
            # Detect limits
            self.calculate_limits()

        except pygame.error, message:
            print message
        except IndexError, message:
            print message
        except Exception, message:
            print message
            print "No cached image, generating it"
        except:
            print "No cached image, generating it"

        level_md5 = utils.file_md5("{0}.gif".format(stage_file))
        print "MD5 of level ", level_md5
        level_old_md5 = utils.get_option("{0}_hash".format(stage_file))
        print "Old MD5 of level ", level_old_md5
        if level_md5 != level_old_md5:
            print "Level has changed, process it again"
            cached_image = False

        if cached_image == False:
            self.overimage_lava, self.overrect_lava = utils.load_image("lava.png")
            self.overimage_rocks, self.overrect_rocks = utils.load_image("rocks.png")

            self.image_grass_bl, self.rect_grass = utils.load_image("stage_grass_bl.png")
            self.image_grass_br, self.rect_grass = utils.load_image("stage_grass_br.png")
            self.image_grass_tl, self.rect_grass = utils.load_image("stage_grass_tl.png")
            self.image_grass_tr, self.rect_grass = utils.load_image("stage_grass_tr.png")
def archive(archive_group,archive_id,start_date,end_date,delete_after_archive=False,check=False,overwrite=False):
    """
    Archive the resouce tracking history by start_date(inclusive), end_date(exclusive)
    archive_id: a unique identity of the archive file. that means different start_date and end_date should have a different archive_id
    overwrite: False: raise exception if archive_id already exists; True: overwrite the existing archive file
    delete_after_archive: delete the archived data from table tracking_loggedpoint
    check: check whether archiving is succeed or not
    """
    db = settings.DATABASE
    archive_filename = "{}.gpkg".format(archive_id)
    metadata = {
        "start_archive":timezone.now(),
        "resource_id":archive_id,
        "resource_file":archive_filename,
        "resource_group":archive_group,
        "start_archive_date":start_date,
        "end_archive_date":end_date
    }

    filename = None
    vrt_filename = None
    work_folder = tempfile.mkdtemp(prefix="archive_loggedpoint")
    def set_end_archive(metadata):
        metadata["end_archive"] = timezone.now()
    resourcemetadata = None
    try:
        logger.debug("Begin to archive loggedpoint, archive_group={},archive_id={},start_date={},end_date={}".format(archive_group,archive_id,start_date,end_date))
        blob_resource = get_blob_resource()
        if not overwrite:
            #check whether achive exist or not
            resourcemetadata = blob_resource.resourcemetadata
            if blob_resource.is_exist(archive_id,resource_group=archive_group):
                raise ResourceAlreadyExist("The loggedpoint has already been archived. archive_id={0},start_archive_date={1},end_archive_date={2}".format(archive_id,start_date,end_date))

        #export the archived data as geopackage
        sql = archive_sql.format(start_date.strftime(datetime_pattern),end_date.strftime(datetime_pattern))
        export_result = db.export_spatial_data(sql,filename=os.path.join(work_folder,"loggedpoint.gpkg"),layer=archive_id)
        if not export_result:
            logger.debug("No loggedpoints to archive, archive_group={},archive_id={},start_date={},end_date={}".format(archive_group,archive_id,start_date,end_date))
            return

        layer_metadata,filename = export_result
        metadata["file_md5"] = utils.file_md5(filename)
        metadata["layer"] = layer_metadata["layer"]
        metadata["features"] = layer_metadata["features"]
        #upload archive file
        logger.debug("Begin to push loggedpoint archive file to blob storage, archive_group={},archive_id={},start_date={},end_date={}".format(archive_group,archive_id,start_date,end_date))
        resourcemetadata = blob_resource.push_file(filename,metadata=metadata,f_post_push=_set_end_datetime("end_archive"))
        if check:
            #check whether uploaded succeed or not
            logger.debug("Begin to check whether loggedpoint archive file was pushed to blob storage successfully, archive_group={},archive_id={},start_date={},end_date={}".format(
                archive_group,archive_id,start_date,end_date
            ))
            d_metadata,d_filename = blob_resource.download(archive_id,resource_group=archive_group,filename=os.path.join(work_folder,"loggedpoint_download.gpkg"))
            d_file_md5 = utils.file_md5(d_filename)
            if metadata["file_md5"] != d_file_md5:
                raise Exception("Upload loggedpoint archive file failed.source file's md5={}, uploaded file's md5={}".format(metadata["file_md5"],d_file_md5))

            d_layer_metadata = gdal.get_layers(d_filename)[0]
            if d_layer_metadata["features"] != layer_metadata["features"]:
                raise Exception("Upload loggedpoint archive file failed.source file's features={}, uploaded file's features={}".format(layer_metadata["features"],d_layer_metadata["features"]))
        

        #update vrt file
        logger.debug("Begin to update vrt file to union all spatial files in the same group, archive_group={},archive_id={},start_date={},end_date={}".format(
            archive_group,archive_id,start_date,end_date
        ))
        groupmetadata = resourcemetadata[archive_group]
        vrt_id = "{}.vrt".format(archive_group)
        try:
            vrt_metadata = next(m for m in groupmetadata.values() if m["resource_id"] == vrt_id)
        except StopIteration as ex:
            vrt_metadata = {"resource_id":vrt_id,"resource_file":vrt_id,"resource_group":archive_group}

        vrt_metadata["features"] = 0
        for m in groupmetadata.values():
            if m["resource_id"] == vrt_id:
                continue
            vrt_metadata["features"] += m["features"]

        layers =  [(m["resource_id"],m["resource_file"]) for m in groupmetadata.values() if m["resource_id"] != vrt_id]
        layers.sort(key=lambda o:o[0])
        layers = os.linesep.join(individual_layer.format(m[0],m[1]) for m in layers )
        vrt_data = vrt.format(archive_group,layers)
        vrt_filename = os.path.join(work_folder,"loggedpoint.vrt")
        with open(vrt_filename,"w") as f:
            f.write(vrt_data)

        vrt_metadata["file_md5"] = utils.file_md5(vrt_filename)

        resourcemetadata = blob_resource.push_file(vrt_filename,metadata=vrt_metadata,f_post_push=_set_end_datetime("updated"))
        if check:
            #check whether uploaded succeed or not
            logger.debug("Begin to check whether the group vrt file was pused to blob storage successfully, archive_group={},archive_id={},start_date={},end_date={}".format(
                archive_group,archive_id,start_date,end_date
            ))
            d_vrt_metadata,d_vrt_filename = blob_resource.download(vrt_id,resource_group=archive_group,filename=os.path.join(work_folder,"loggedpoint_download.vrt"))
            d_vrt_file_md5 = utils.file_md5(d_vrt_filename)
            if vrt_metadata["file_md5"] != d_vrt_file_md5:
                raise Exception("Upload vrt file failed.source file's md5={}, uploaded file's md5={}".format(vrt_metadata["file_md5"],d_vrt_file_md5))

        if delete_after_archive:
            logger.debug("Begin to delete archived data, archive_group={},archive_id={},start_date={},end_date={}".format(
                archive_group,archive_id,start_date,end_date
            ))

            delete_sql = del_sql.format(start_date.strftime(datetime_pattern),end_date.strftime(datetime_pattern))
            deleted_rows = db.update(delete_sql)
            logger.debug("Delete {} rows from table tracking_loggedpoint, archive_group={},archive_id={},start_date={},end_date={}".format(
                deleted_rows,archive_group,archive_id,start_date,end_date
            ))

        logger.debug("End to archive loggedpoint, archive_group={},archive_id={},start_date={},end_date={}".format(archive_group,archive_id,start_date,end_date))


    finally:
        utils.remove_folder(work_folder)
        pass