Ejemplo n.º 1
0
    + str(sys.version.split()[0]) + "."\
    "\n\nCopyright (c) 2015 Enclustra GmbH, Switzerland." \
    "\nAll rights reserved."

args = parser.parse_args()

if args.disable_colors is True:
    utils.set_colors(False)

if args.version is True:
    print(str("\n" + tool_version + "\n"))
    sys.exit(0)

elif args.clean_all is True:
    utils.print_message(utils.logtype.INFO, "Cleaning ...")
    utils.remove_folder(root_path + "/bin")
    utils.remove_folder(root_path + "/binaries")
    utils.remove_folder(root_path + "/sources")
    # get all the output dirs
    dirs = [name for name in os.listdir(root_path) if
            os.path.isdir(os.path.join(root_path, name))]
    out_dirs = filter(lambda pref: 'out_' in pref, dirs)
    for directory in out_dirs:
        utils.remove_folder(root_path + "/" + directory)
    utils.print_message(utils.logtype.INFO, "Done.")
    sys.exit(0)

# if we're in console mode
elif args.saved_config is not None:
    if not os.path.isfile(args.saved_config):
        utils.print_message(utils.logtype.ERROR,
Ejemplo n.º 2
0
 def tearDown(self):
     remove_folder(self.dataset_name)
Ejemplo n.º 3
0
 def tearDown(self):
     remove_folder(self.dataset_name)
     clean_tmp(self.tmp_path)
def process_cm_binary_data(name,
                           data,
                           main_jsonnet,
                           ext_libs=[],
                           user_args={}):
    """Process binary_data field from jsonnet configMap.

    Extracts folder, evaluates main_jsonnet file from folder
    and parses it to separate json objects.
    main_jsonnet should generate all jsons in one json file.

    Args:
        name (str): Config map name.
        data (dict): Binary data from configMap labeled as jsonnet code.
            It should be base64 encoded jsonnet folder (archive).
        main_jsonnet (str): Path in extracted folder to jsonnet file
            that will be evaluated.
        ext_libs (:obj:`list of str`, optional): List of paths to
            external jsonnet libs.
        user_args (:obj:`dict`, optional): Keyword arguments to jsonnet build function.

    Returns:
        list of (str, dict): Generated json data.

    Raises:
        JsonnetConfigMapError: Raised if jsonnet evaluation fails or
            wrong archive format is provided.
    """
    tmp_folder_name = f"jsonnet_archive_{name}"
    tmp_file_name = f"generated_from_archive_{name}.json"

    jsons = []
    for dataKey in data.keys():
        filename, extension = os.path.splitext(dataKey)

        if extension not in [
                ".gz",
                ".tar",
                ".zip",
                ".bz2",
                ".7z",
                ".tgz",
                ".rar",
                ".xz",
        ]:
            log.error(f"Unsupported archive format: {dataKey}")
            raise JsonnetConfigMapError

        archive_data = data[dataKey]
        utils.extract_archive_data(archive_data, dataKey, tmp_folder_name)

        jsonnet_filepath = os.path.join(tmp_folder_name, main_jsonnet)
        try:
            json_ = _jsonnet.evaluate_file(jsonnet_filepath,
                                           jpathdir=ext_libs,
                                           **user_args)
        except RuntimeError as e:
            log.error(
                f"{main_jsonnet} is not a valid jsonnet, raised error: {e}")
            utils.remove_folder(tmp_folder_name)
            raise JsonnetConfigMapError
        else:
            utils.save_text_to_file("./", tmp_file_name, json_)
            dashboards = parse_json_with_files(tmp_file_name)
            jsons.extend(dashboards)

            utils.remove_file("./", tmp_file_name)
            utils.remove_folder(tmp_folder_name)

    return jsons
Ejemplo n.º 5
0
    try:
        ebe_overlays = odir + "/overlays"
        utl.add_tool_template("ebe_overlays", ebe_overlays)
        utl.mkdir_p(ebe_overlays)
    except Exception as e:
        msg = "Unable to create a directory for EBE overlays"
        utl.print_message(utl.logtype.ERROR, msg, str(e))


if args.version is True:
    print(str("\n" + tool_version + "\n"))
    sys.exit(0)

elif args.clean_all is True:
    utils.print_message(utils.logtype.INFO, "Cleaning ...")
    utils.remove_folder(root_path + "/bin")
    utils.remove_folder(root_path + "/binaries")
    call = "git submodule deinit --force sources"
    utils.call_tool(call)
    # get all the output dirs
    dirs = [name for name in os.listdir(root_path) if
            os.path.isdir(os.path.join(root_path, name))]
    out_dirs = filter(lambda pref: 'out_' in pref, dirs)
    for directory in out_dirs:
        utils.remove_folder(root_path + "/" + directory)
    utils.print_message(utils.logtype.INFO, "Done.")
    sys.exit(0)

# if we're in console mode
elif args.saved_config is not None:
    if not os.path.isfile(args.saved_config):
def archive(archive_group,archive_id,start_date,end_date,delete_after_archive=False,check=False,overwrite=False):
    """
    Archive the resouce tracking history by start_date(inclusive), end_date(exclusive)
    archive_id: a unique identity of the archive file. that means different start_date and end_date should have a different archive_id
    overwrite: False: raise exception if archive_id already exists; True: overwrite the existing archive file
    delete_after_archive: delete the archived data from table tracking_loggedpoint
    check: check whether archiving is succeed or not
    """
    db = settings.DATABASE
    archive_filename = "{}.gpkg".format(archive_id)
    metadata = {
        "start_archive":timezone.now(),
        "resource_id":archive_id,
        "resource_file":archive_filename,
        "resource_group":archive_group,
        "start_archive_date":start_date,
        "end_archive_date":end_date
    }

    filename = None
    vrt_filename = None
    work_folder = tempfile.mkdtemp(prefix="archive_loggedpoint")
    def set_end_archive(metadata):
        metadata["end_archive"] = timezone.now()
    resourcemetadata = None
    try:
        logger.debug("Begin to archive loggedpoint, archive_group={},archive_id={},start_date={},end_date={}".format(archive_group,archive_id,start_date,end_date))
        blob_resource = get_blob_resource()
        if not overwrite:
            #check whether achive exist or not
            resourcemetadata = blob_resource.resourcemetadata
            if blob_resource.is_exist(archive_id,resource_group=archive_group):
                raise ResourceAlreadyExist("The loggedpoint has already been archived. archive_id={0},start_archive_date={1},end_archive_date={2}".format(archive_id,start_date,end_date))

        #export the archived data as geopackage
        sql = archive_sql.format(start_date.strftime(datetime_pattern),end_date.strftime(datetime_pattern))
        export_result = db.export_spatial_data(sql,filename=os.path.join(work_folder,"loggedpoint.gpkg"),layer=archive_id)
        if not export_result:
            logger.debug("No loggedpoints to archive, archive_group={},archive_id={},start_date={},end_date={}".format(archive_group,archive_id,start_date,end_date))
            return

        layer_metadata,filename = export_result
        metadata["file_md5"] = utils.file_md5(filename)
        metadata["layer"] = layer_metadata["layer"]
        metadata["features"] = layer_metadata["features"]
        #upload archive file
        logger.debug("Begin to push loggedpoint archive file to blob storage, archive_group={},archive_id={},start_date={},end_date={}".format(archive_group,archive_id,start_date,end_date))
        resourcemetadata = blob_resource.push_file(filename,metadata=metadata,f_post_push=_set_end_datetime("end_archive"))
        if check:
            #check whether uploaded succeed or not
            logger.debug("Begin to check whether loggedpoint archive file was pushed to blob storage successfully, archive_group={},archive_id={},start_date={},end_date={}".format(
                archive_group,archive_id,start_date,end_date
            ))
            d_metadata,d_filename = blob_resource.download(archive_id,resource_group=archive_group,filename=os.path.join(work_folder,"loggedpoint_download.gpkg"))
            d_file_md5 = utils.file_md5(d_filename)
            if metadata["file_md5"] != d_file_md5:
                raise Exception("Upload loggedpoint archive file failed.source file's md5={}, uploaded file's md5={}".format(metadata["file_md5"],d_file_md5))

            d_layer_metadata = gdal.get_layers(d_filename)[0]
            if d_layer_metadata["features"] != layer_metadata["features"]:
                raise Exception("Upload loggedpoint archive file failed.source file's features={}, uploaded file's features={}".format(layer_metadata["features"],d_layer_metadata["features"]))
        

        #update vrt file
        logger.debug("Begin to update vrt file to union all spatial files in the same group, archive_group={},archive_id={},start_date={},end_date={}".format(
            archive_group,archive_id,start_date,end_date
        ))
        groupmetadata = resourcemetadata[archive_group]
        vrt_id = "{}.vrt".format(archive_group)
        try:
            vrt_metadata = next(m for m in groupmetadata.values() if m["resource_id"] == vrt_id)
        except StopIteration as ex:
            vrt_metadata = {"resource_id":vrt_id,"resource_file":vrt_id,"resource_group":archive_group}

        vrt_metadata["features"] = 0
        for m in groupmetadata.values():
            if m["resource_id"] == vrt_id:
                continue
            vrt_metadata["features"] += m["features"]

        layers =  [(m["resource_id"],m["resource_file"]) for m in groupmetadata.values() if m["resource_id"] != vrt_id]
        layers.sort(key=lambda o:o[0])
        layers = os.linesep.join(individual_layer.format(m[0],m[1]) for m in layers )
        vrt_data = vrt.format(archive_group,layers)
        vrt_filename = os.path.join(work_folder,"loggedpoint.vrt")
        with open(vrt_filename,"w") as f:
            f.write(vrt_data)

        vrt_metadata["file_md5"] = utils.file_md5(vrt_filename)

        resourcemetadata = blob_resource.push_file(vrt_filename,metadata=vrt_metadata,f_post_push=_set_end_datetime("updated"))
        if check:
            #check whether uploaded succeed or not
            logger.debug("Begin to check whether the group vrt file was pused to blob storage successfully, archive_group={},archive_id={},start_date={},end_date={}".format(
                archive_group,archive_id,start_date,end_date
            ))
            d_vrt_metadata,d_vrt_filename = blob_resource.download(vrt_id,resource_group=archive_group,filename=os.path.join(work_folder,"loggedpoint_download.vrt"))
            d_vrt_file_md5 = utils.file_md5(d_vrt_filename)
            if vrt_metadata["file_md5"] != d_vrt_file_md5:
                raise Exception("Upload vrt file failed.source file's md5={}, uploaded file's md5={}".format(vrt_metadata["file_md5"],d_vrt_file_md5))

        if delete_after_archive:
            logger.debug("Begin to delete archived data, archive_group={},archive_id={},start_date={},end_date={}".format(
                archive_group,archive_id,start_date,end_date
            ))

            delete_sql = del_sql.format(start_date.strftime(datetime_pattern),end_date.strftime(datetime_pattern))
            deleted_rows = db.update(delete_sql)
            logger.debug("Delete {} rows from table tracking_loggedpoint, archive_group={},archive_id={},start_date={},end_date={}".format(
                deleted_rows,archive_group,archive_id,start_date,end_date
            ))

        logger.debug("End to archive loggedpoint, archive_group={},archive_id={},start_date={},end_date={}".format(archive_group,archive_id,start_date,end_date))


    finally:
        utils.remove_folder(work_folder)
        pass