コード例 #1
0
ファイル: backup.py プロジェクト: tdanekkb/ibm_zos_core
def mvs_file_backup(dsn, bk_dsn):
    """Create a backup data set for an MVS data set

    Arguments:
        dsn {str} -- The name of the data set to backup.
                        It could be an MVS PS/PDS/PDSE/VSAM(KSDS), etc.
        bk_dsn {str} -- The name of the backup data set.

    Raises:
        BackupError: When backup data set exists.
        BackupError: When creation of backup data set fails.
    """
    dsn = _validate_data_set_name(dsn).upper()
    bk_dsn = _validate_data_set_name(bk_dsn).upper()
    if not bk_dsn:
        hlq = Datasets.hlq()
        bk_dsn = Datasets.temp_name(hlq)

    cp_rc = _copy_ds(dsn, bk_dsn)
    # The data set is probably a PDS or PDSE
    if cp_rc == 12:
        # Delete allocated backup that was created when attempting to use _copy_ds()
        # Safe to delete because _copy_ds() would have raised an exception if it did
        # not successfully create the backup data set, so no risk of it predating module invocation
        Datasets.delete(bk_dsn)
        if Datasets.move(dsn, bk_dsn) == 0:
            _allocate_model(dsn, bk_dsn)
        else:
            raise BackupError("Unable to backup data set {0} to {1}".format(
                dsn, bk_dsn))
コード例 #2
0
    def create_temp_data_set(LLQ,
                             ds_type="SEQ",
                             size="5M",
                             ds_format="FB",
                             lrecl=80):
        """Creates a temporary data set with the given low level qualifier.

        Arguments:
            LLQ {str} -- Low Level Qualifier to be used for temporary data set
            ds_type {str} -- The data set type, default: Sequential
            size {str} -- The size of the data set, default: 5M
            format {str} -- The record format of the data set, default: FB
            lrecl {int} -- The record length of the data set, default: 80

        Returns:
            str -- Name of the created data set

        Raises:
            OSError: When non-zero return code is received
            from Datasets.create()
        """
        chars = ascii_uppercase
        HLQ2 = ''.join(choice(chars) for i in range(5))
        HLQ3 = ''.join(choice(chars) for i in range(6))
        temp_ds_name = "{0}.{1}.{2}.{3}".format(Datasets.hlq(), HLQ2, HLQ3,
                                                LLQ)

        rc = Datasets.create(temp_ds_name, ds_type, size, ds_format, "", lrecl)
        if rc != 0:
            raise OSError("Unable to create temporary data set")

        return temp_ds_name
コード例 #3
0
 def __init__(self):
     """VIO DD type to be used in a DDStatement.
     VIO uses DASD space and system I/O more efficiently than other temporary data sets.
     A temporary data set will be created for use in cases where VIO is unavailable.
     Defaults for VIODefinition should be sufficient.
     """
     hlq = Datasets.hlq()
     name = Datasets.temp_name(hlq)
     super().__init__(name)
コード例 #4
0
ファイル: encode.py プロジェクト: tdanekkb/ibm_zos_core
    def temp_data_set(self, reclen, space_u):
        """Creates a temporary data set with the given record length and size

        Arguments:
            size {str} -- The size of the data set
            lrecl {int} -- The record length of the data set

        Returns:
            str -- Name of the allocated data set

        Raises:
            OSError: When any exception is raised during the data set allocation
        """
        size = str(space_u * 2) + "K"
        hlq = Datasets.hlq()
        temp_ps = Datasets.temp_name(hlq)
        rc = Datasets.create(temp_ps, "SEQ", size, "VB", "", reclen)
        if rc:
            raise OSError(
                "Failed when allocating temporary sequential data set!")
        return temp_ps
コード例 #5
0
def mvs_file_backup(dsn, bk_dsn=None):
    """Create a backup data set for an MVS data set

    Arguments:
        dsn {str} -- The name of the data set to backup.
                        It could be an MVS PS/PDS/PDSE/VSAM(KSDS), etc.
        bk_dsn {str} -- The name of the backup data set.

    Raises:
        BackupError: When backup data set exists.
        BackupError: When creation of backup data set fails.
    """
    dsn = _validate_data_set_name(dsn).upper()
    if is_member(dsn):
        if not bk_dsn:
            bk_dsn = extract_dsname(dsn) + "({0})".format(temp_member_name())
        bk_dsn = _validate_data_set_name(bk_dsn).upper()
        if Datasets.copy(dsn, bk_dsn) != 0:
            raise BackupError("Unable to backup {0} to {1}".format(
                dsn, bk_dsn))
    else:
        if not bk_dsn:
            bk_dsn = Datasets.temp_name(Datasets.hlq())
        bk_dsn = _validate_data_set_name(bk_dsn).upper()
        cp_rc = _copy_ds(dsn, bk_dsn)
        if cp_rc == 12:  # The data set is probably a PDS or PDSE
            # Delete allocated backup that was created when attempting to use _copy_ds()
            # Safe to delete because _copy_ds() would have raised an exception if it did
            # not successfully create the backup data set, so no risk of it predating module invocation
            Datasets.delete(bk_dsn)
            _allocate_model(bk_dsn, dsn)
            rc, out, err = _copy_pds(dsn, bk_dsn)
            if rc != 0:
                raise BackupError(
                    "Unable to backup data set {0} to {1}".format(dsn, bk_dsn))
    return bk_dsn
コード例 #6
0
import mysql.connector
#Import the Z Open Automation Utilities libraries we need
from zoautil_py import MVSCmd, Datasets, Jobs
from zoautil_py.types import DDStatement
# Import datetime, needed so we can format the report
from datetime import datetime
# Import os, needed to get the environment variables
import os
import string
import time
#Take the contents of this data set and read it into content variables
USERID = os.getenv('USER')
hlq = Datasets.hlq()
print("running jobs for ", hlq)
damember = "%s.OUTPUT.SDSFDAS" % hlq
da_contents = Datasets.read(damember)
#ckmember="%s.OUTPUT.SDSFCKS" % hlq
#hc_contents = Datasets.read(ckmember)

now = datetime.now()
formatted_date = now.strftime('%Y-%m-%d %H:%M:%S')

#Submit the job
jclmember = "%s.JCL(SDSFJCL)" % hlq
jobid = Jobs.submit(dataset=jclmember)
while True:
    js = Jobs.list(job_id=jobid)[0]
    if (js["return"] != "?"):
        break
    else:
        print(js)
コード例 #7
0
def run_module():
    # ********************************************************** #
    #                Module initialization                       #
    # ********************************************************** #
    module = AnsibleModule(argument_spec=dict(
        src=dict(required=True, type="str"),
        dest=dict(required=True, type="path"),
        fail_on_missing=dict(required=False, default=True, type="bool"),
        flat=dict(required=False, default=True, type="bool"),
        is_binary=dict(required=False, default=False, type="bool"),
        use_qualifier=dict(required=False, default=False, type="bool"),
        validate_checksum=dict(required=False, default=True, type="bool"),
        encoding=dict(required=False, type="dict"),
    ))

    src = module.params.get("src")
    if module.params.get("use_qualifier"):
        module.params["src"] = Datasets.hlq() + "." + src

    # ********************************************************** #
    #                   Verify paramater validity                #
    # ********************************************************** #

    arg_def = dict(
        src=dict(arg_type="data_set_or_path", required=True),
        dest=dict(arg_type="path", required=True),
        fail_on_missing=dict(arg_type="bool", required=False, default=True),
        is_binary=dict(arg_type="bool", required=False, default=False),
        use_qualifier=dict(arg_type="bool", required=False, default=False),
    )

    if module.params.get("encoding"):
        module.params.update(
            dict(
                from_encoding=module.params.get("encoding").get("from"),
                to_encoding=module.params.get("encoding").get("to"),
            ))
        arg_def.update(
            dict(
                from_encoding=dict(arg_type="encoding"),
                to_encoding=dict(arg_type="encoding"),
            ))

    fetch_handler = FetchHandler(module)

    try:
        parser = better_arg_parser.BetterArgParser(arg_def)
        parsed_args = parser.parse_args(module.params)
    except ValueError as err:
        module.fail_json(msg="Parameter verification failed", stderr=str(err))
    src = parsed_args.get("src")
    b_src = to_bytes(src)
    fail_on_missing = boolean(parsed_args.get("fail_on_missing"))
    use_qualifier = boolean(parsed_args.get("use_qualifier"))
    is_binary = boolean(parsed_args.get("is_binary"))
    encoding = module.params.get("encoding")

    # ********************************************************** #
    #  Check for data set existence and determine its type       #
    # ********************************************************** #

    res_args = dict()
    _fetch_member = "(" in src and src.endswith(")")
    ds_name = src if not _fetch_member else src[:src.find("(")]
    try:
        ds_utils = data_set.DataSetUtils(ds_name)
        if not ds_utils.exists():
            if fail_on_missing:
                module.fail_json(msg=("The source '{0}' does not exist or is "
                                      "uncataloged".format(ds_name)))
            module.exit_json(
                note=("Source '{0}' was not found. No data was fetched".format(
                    ds_name)))
        ds_type = ds_utils.ds_type()
        if not ds_type:
            module.fail_json(msg="Unable to determine data set type")

    except Exception as err:
        module.fail_json(msg="Error while gathering data set information",
                         stderr=str(err))

    # ********************************************************** #
    #                  Fetch a sequential data set               #
    # ********************************************************** #

    if ds_type == "PS":
        file_path = fetch_handler._fetch_mvs_data(src, is_binary, encoding)
        res_args["remote_path"] = file_path

    # ********************************************************** #
    #    Fetch a partitioned data set or one of its members      #
    # ********************************************************** #

    elif ds_type == "PO":
        if _fetch_member:
            member_name = src[src.find("(") + 1:src.find(")")]
            if not ds_utils.member_exists(member_name):
                module.fail_json(
                    msg=("The data set member '{0}' was not found inside data "
                         "set '{1}'").format(member_name, ds_name))
            file_path = fetch_handler._fetch_mvs_data(src, is_binary, encoding)
            res_args["remote_path"] = file_path
        else:
            res_args["remote_path"] = fetch_handler._fetch_pdse(
                src, is_binary, encoding)

    # ********************************************************** #
    #                  Fetch a USS file                          #
    # ********************************************************** #

    elif ds_type == "USS":
        if not os.access(b_src, os.R_OK):
            module.fail_json(
                msg="File '{0}' does not have appropriate read permission".
                format(src))
        file_path = fetch_handler._fetch_uss_file(src, is_binary, encoding)
        res_args["remote_path"] = file_path

    # ********************************************************** #
    #                  Fetch a VSAM data set                     #
    # ********************************************************** #

    elif ds_type == "VSAM":
        file_path = fetch_handler._fetch_vsam(src, is_binary, encoding)
        res_args["remote_path"] = file_path

    res_args["file"] = ds_name
    res_args["ds_type"] = ds_type
    module.exit_json(**res_args)