Beispiel #1
0
def _catalog_non_vsam_data_set(name, volume):
    """Catalog a non-VSAM data set.

    Arguments:
        name {str} -- The data set to catalog.
        volume {str} -- The volume the data set resides on.

    Raises:
        DatasetCatalogError: When attempt at catalog fails.
    """
    iehprogm_input = _build_non_vsam_catalog_command(name, volume)
    try:
        temp_data_set_name = _create_temp_data_set(name.split(".")[0])
        _write_data_set(temp_data_set_name, iehprogm_input)
        rc, stdout, stderr = module.run_command(
            "mvscmdauth --pgm=iehprogm --sysprint=* --sysin={0}".format(
                temp_data_set_name
            )
        )
        if rc != 0 or "NORMAL END OF TASK RETURNED" not in stdout:
            raise DatasetCatalogError(name, volume, rc)
    except Exception:
        raise
    finally:
        Datasets.delete(temp_data_set_name)
    return
Beispiel #2
0
def _uncatalog_vsam_data_set(name):
    """Uncatalog a VSAM data set.

    Arguments:
        name {str} -- The name of the data set to uncatalog.

    Raises:
        DatasetUncatalogError: When uncataloging fails.
    """
    idcams_input = VSAM_UNCATALOG_COMMAND.format(name)
    try:
        temp_data_set_name = _create_temp_data_set(name.split(".")[0])
        _write_data_set(temp_data_set_name, idcams_input)
        dd_statements = []
        dd_statements.append(
            types.DDStatement(ddName="sysin", dataset=temp_data_set_name)
        )
        dd_statements.append(types.DDStatement(ddName="sysprint", dataset="*"))
        rc = MVSCmd.execute_authorized(pgm="idcams", args="", dds=dd_statements)
        if rc != 0:
            raise DatasetUncatalogError(name, rc)
    except Exception:
        raise
    finally:
        Datasets.delete(temp_data_set_name)
    return
Beispiel #3
0
def _uncatalog_non_vsam_data_set(name):
    """Uncatalog a non-VSAM data set.

    Arguments:
        name {str} -- The name of the data set to uncatalog.

    Raises:
        DatasetUncatalogError: When uncataloging fails.
    """
    iehprogm_input = NON_VSAM_UNCATALOG_COMMAND.format(name)
    try:
        temp_data_set_name = _create_temp_data_set(name.split(".")[0])
        _write_data_set(temp_data_set_name, iehprogm_input)
        rc, stdout, stderr = module.run_command(
            "mvscmdauth --pgm=iehprogm --sysprint=* --sysin={0}".format(
                temp_data_set_name
            )
        )
        if rc != 0 or "NORMAL END OF TASK RETURNED" not in stdout:
            raise DatasetUncatalogError(name, rc)
    except Exception:
        raise
    finally:
        Datasets.delete(temp_data_set_name)
    return
Beispiel #4
0
def mvs_file_backup(dsn, bk_dsn):
    """Create a backup data set for an MVS data set

    Arguments:
        dsn {str} -- The name of the data set to backup.
                        It could be an MVS PS/PDS/PDSE/VSAM(KSDS), etc.
        bk_dsn {str} -- The name of the backup data set.

    Raises:
        BackupError: When backup data set exists.
        BackupError: When creation of backup data set fails.
    """
    dsn = _validate_data_set_name(dsn).upper()
    bk_dsn = _validate_data_set_name(bk_dsn).upper()
    if not bk_dsn:
        hlq = Datasets.hlq()
        bk_dsn = Datasets.temp_name(hlq)

    cp_rc = _copy_ds(dsn, bk_dsn)
    # The data set is probably a PDS or PDSE
    if cp_rc == 12:
        # Delete allocated backup that was created when attempting to use _copy_ds()
        # Safe to delete because _copy_ds() would have raised an exception if it did
        # not successfully create the backup data set, so no risk of it predating module invocation
        Datasets.delete(bk_dsn)
        if Datasets.move(dsn, bk_dsn) == 0:
            _allocate_model(dsn, bk_dsn)
        else:
            raise BackupError("Unable to backup data set {0} to {1}".format(
                dsn, bk_dsn))
Beispiel #5
0
def _copy_ds(ds, bk_ds):
    """Copy the contents of a data set to another

    Arguments:
        ds {str} -- The source data set to be copied from. Should be SEQ or VSAM
        bk_dsn {str} -- The destination data set to copy to.

    Raises:
        BackupError: When copying data fails
    """
    module = AnsibleModule(argument_spec={}, check_invalid_arguments=False)
    _allocate_model(bk_ds, ds)
    repro_cmd = """  REPRO -
    INDATASET({0}) -
    OUTDATASET({1})""".format(ds, bk_ds)
    rc, out, err = module.run_command(
        "mvscmdauth --pgm=idcams --sysprint=* --sysin=stdin", data=repro_cmd)
    if rc != 0 and rc != 12:
        Datasets.delete(bk_ds)
        raise BackupError(
            "Unable to backup data set {0}; stdout: {1}; stderr: {2}".format(
                ds, out, err))
    if rc != 0 and _vsam_empty(ds):
        rc = 0
    return rc
Beispiel #6
0
    def create_temp_data_set(LLQ,
                             ds_type="SEQ",
                             size="5M",
                             ds_format="FB",
                             lrecl=80):
        """Creates a temporary data set with the given low level qualifier.

        Arguments:
            LLQ {str} -- Low Level Qualifier to be used for temporary data set
            ds_type {str} -- The data set type, default: Sequential
            size {str} -- The size of the data set, default: 5M
            format {str} -- The record format of the data set, default: FB
            lrecl {int} -- The record length of the data set, default: 80

        Returns:
            str -- Name of the created data set

        Raises:
            OSError: When non-zero return code is received
            from Datasets.create()
        """
        chars = ascii_uppercase
        HLQ2 = ''.join(choice(chars) for i in range(5))
        HLQ3 = ''.join(choice(chars) for i in range(6))
        temp_ds_name = "{0}.{1}.{2}.{3}".format(Datasets.hlq(), HLQ2, HLQ3,
                                                LLQ)

        rc = Datasets.create(temp_ds_name, ds_type, size, ds_format, "", lrecl)
        if rc != 0:
            raise OSError("Unable to create temporary data set")

        return temp_ds_name
Beispiel #7
0
 def __init__(self):
     """VIO DD type to be used in a DDStatement.
     VIO uses DASD space and system I/O more efficiently than other temporary data sets.
     A temporary data set will be created for use in cases where VIO is unavailable.
     Defaults for VIODefinition should be sufficient.
     """
     hlq = Datasets.hlq()
     name = Datasets.temp_name(hlq)
     super().__init__(name)
Beispiel #8
0
    def _copy_vsam_to_temp_data_set(self, ds_name):
        """ Copy VSAM data set to a temporary sequential data set """
        mvs_rc = 0
        vsam_size = self._get_vsam_size(ds_name)
        sysprint = sysin = out_ds_name = None
        try:
            sysin = data_set.DataSet.create_temp("MVSTMP")
            sysprint = data_set.DataSet.create_temp("MVSTMP")
            out_ds_name = data_set.DataSet.create_temp("MSVTMP",
                                                       space_primary=vsam_size,
                                                       space_type="K")
            repro_sysin = " REPRO INFILE(INPUT)  OUTFILE(OUTPUT) "
            Datasets.write(sysin, repro_sysin)

            dd_statements = []
            dd_statements.append(
                types.DDStatement(ddName="sysin", dataset=sysin))
            dd_statements.append(
                types.DDStatement(ddName="input", dataset=ds_name))
            dd_statements.append(
                types.DDStatement(ddName="output", dataset=out_ds_name))
            dd_statements.append(
                types.DDStatement(ddName="sysprint", dataset=sysprint))

            mvs_rc = MVSCmd.execute_authorized(pgm="idcams",
                                               args="",
                                               dds=dd_statements)

        except OSError as err:
            self._fail_json(msg=str(err))

        except Exception as err:
            if Datasets.exists(out_ds_name):
                Datasets.delete(out_ds_name)

            if mvs_rc != 0:
                self._fail_json(
                    msg=(
                        "Non-zero return code received while executing MVSCmd "
                        "to copy VSAM data set {0}".format(ds_name)),
                    rc=mvs_rc,
                )
            self._fail_json(
                msg=
                ("Failed to call IDCAMS to copy VSAM data set {0} to a temporary"
                 " sequential data set".format(ds_name)),
                stderr=str(err),
                rc=mvs_rc,
            )

        finally:
            Datasets.delete(sysprint)
            Datasets.delete(sysin)

        return out_ds_name
Beispiel #9
0
def check_pds_member(ds, mem):
    check_rc = False
    if mem in Datasets.list_members(ds):
        check_rc = True
    else:
        raise EncodeError("Cannot find member {0} in {1}".format(mem, ds))
    return check_rc
Beispiel #10
0
def _catalog_vsam_data_set(name, volume):
    """Catalog a VSAM data set.

    Arguments:
        name {str} -- The data set to catalog.
        volume {str} -- The volume the data set resides on.

    Raises:
        DatasetCatalogError: When attempt at catalog fails.
    """
    data_set_name = name.upper()
    data_set_volume = volume.upper()
    success = False
    try:
        temp_data_set_name = _create_temp_data_set(name.split(".")[0])
        command_rc = 0
        for data_set_type in ["", "LINEAR", "INDEXED", "NONINDEXED", "NUMBERED"]:
            if data_set_type != "INDEXED":
                command = VSAM_CATALOG_COMMAND_NOT_INDEXED.format(
                    data_set_name, data_set_volume, data_set_type
                )
            else:
                command = VSAM_CATALOG_COMMAND_INDEXED.format(
                    data_set_name, data_set_volume, data_set_type
                )

            _write_data_set(temp_data_set_name, command)
            dd_statements = []
            dd_statements.append(
                types.DDStatement(ddName="sysin", dataset=temp_data_set_name)
            )
            dd_statements.append(types.DDStatement(ddName="sysprint", dataset="*"))
            command_rc = MVSCmd.execute_authorized(
                pgm="idcams", args="", dds=dd_statements
            )
            if command_rc == 0:
                success = True
                break
        if not success:
            raise DatasetCatalogError(
                name, volume, command_rc, "Attempt to catalog VSAM data set failed."
            )
    except Exception:
        raise
    finally:
        Datasets.delete(temp_data_set_name)
    return
Beispiel #11
0
    def temp_data_set(self, reclen, space_u):
        """Creates a temporary data set with the given record length and size

        Arguments:
            size {str} -- The size of the data set
            lrecl {int} -- The record length of the data set

        Returns:
            str -- Name of the allocated data set

        Raises:
            OSError: When any exception is raised during the data set allocation
        """
        size = str(space_u * 2) + "K"
        hlq = Datasets.hlq()
        temp_ps = Datasets.temp_name(hlq)
        rc = Datasets.create(temp_ps, "SEQ", size, "VB", "", reclen)
        if rc:
            raise OSError(
                "Failed when allocating temporary sequential data set!")
        return temp_ps
Beispiel #12
0
def _delete_data_set(name):
    """A wrapper around zoautil_py
    Dataset.delete() to raise exceptions on failure.
    Arguments:
        name {str} -- The name of the data set to delete.
    Raises:
        DatasetDeleteError: When data set deletion fails.
    """
    rc = Datasets.delete(name)
    if rc > 0:
        raise DatasetDeleteError(name, rc)
    return
Beispiel #13
0
def _create_temp_data_set(hlq):
    """Create a temporary data set.
    Arguments:
        hlq {str} -- The HLQ to use for the temporary data set's name.
    Returns:
        str -- The name of the temporary data set.
    """
    temp_data_set_name = Datasets.temp_name(hlq)
    _create_data_set(
        temp_data_set_name, {"type": "SEQ", "size": "5M", "format": "FB", "length": 80},
    )
    return temp_data_set_name
Beispiel #14
0
    def _fetch_vsam(self, src, is_binary, encoding=None):
        """ Copy the contents of a VSAM to a sequential data set.
            Afterwards, copy that data set to a USS file.
        """
        temp_ds = self._copy_vsam_to_temp_data_set(src)
        file_path = self._fetch_mvs_data(temp_ds, is_binary, encoding)
        rc = Datasets.delete(temp_ds)
        if rc != 0:
            os.remove(file_path)
            self._fail_json(
                msg="Unable to delete temporary data set {0}".format(temp_ds),
                rc=rc)

        return file_path
Beispiel #15
0
def _create_data_set(name, extra_args=None):
    """A wrapper around zoautil_py
    Dataset.create() to raise exceptions on failure.
    Arguments:
        name {str} -- The name of the data set to create.
    Raises:
        DatasetCreateError: When data set creation fails.
    """
    if extra_args is None:
        extra_args = {}
    rc = Datasets.create(name, **extra_args)
    if rc > 0:
        raise DatasetCreateError(name, rc)
    return
Beispiel #16
0
def _delete_data_set_member(name):
    """A wrapper around zoautil_py
    Dataset.delete_members() to raise exceptions on failure.

    Arguments:
        name {str} -- The name of the data set, including member name, to delete.

    Raises:
        DatasetMemberDeleteError: When data set member deletion fails.
    """
    rc = Datasets.delete_members(name)
    if rc > 0:
        raise DatasetMemberDeleteError(name, rc)
    return
Beispiel #17
0
def mvs_file_backup(dsn, bk_dsn=None):
    """Create a backup data set for an MVS data set

    Arguments:
        dsn {str} -- The name of the data set to backup.
                        It could be an MVS PS/PDS/PDSE/VSAM(KSDS), etc.
        bk_dsn {str} -- The name of the backup data set.

    Raises:
        BackupError: When backup data set exists.
        BackupError: When creation of backup data set fails.
    """
    dsn = _validate_data_set_name(dsn).upper()
    if is_member(dsn):
        if not bk_dsn:
            bk_dsn = extract_dsname(dsn) + "({0})".format(temp_member_name())
        bk_dsn = _validate_data_set_name(bk_dsn).upper()
        if Datasets.copy(dsn, bk_dsn) != 0:
            raise BackupError("Unable to backup {0} to {1}".format(
                dsn, bk_dsn))
    else:
        if not bk_dsn:
            bk_dsn = Datasets.temp_name(Datasets.hlq())
        bk_dsn = _validate_data_set_name(bk_dsn).upper()
        cp_rc = _copy_ds(dsn, bk_dsn)
        if cp_rc == 12:  # The data set is probably a PDS or PDSE
            # Delete allocated backup that was created when attempting to use _copy_ds()
            # Safe to delete because _copy_ds() would have raised an exception if it did
            # not successfully create the backup data set, so no risk of it predating module invocation
            Datasets.delete(bk_dsn)
            _allocate_model(bk_dsn, dsn)
            rc, out, err = _copy_pds(dsn, bk_dsn)
            if rc != 0:
                raise BackupError(
                    "Unable to backup data set {0} to {1}".format(dsn, bk_dsn))
    return bk_dsn
Beispiel #18
0
def absent(src, line, regexp, encoding):
    """Delete lines with matching regex pattern

    Arguments:
        src: {str} -- The z/OS USS file or data set to modify.
        line: {str} -- The line to be deleted in the src. If line matches,
            regexp will be ignored.
        regexp: {str} -- The regular expression to look for in every line of the src.
        encoding: {str} -- Encoding of the src.

    Returns:
        str -- Information in JSON format. keys:
            cmd: {str} -- dsed shell command
            found: {int} -- Number of matching regex pattern
            changed: {bool} -- Indicates if the source was modified.
    """
    return Datasets.lineinfile(src,
                               line,
                               regexp,
                               encoding=encoding,
                               state=False)
Beispiel #19
0
def present(src, line, regexp, ins_aft, ins_bef, encoding, first_match,
            backrefs):
    """Replace a line with the matching regex pattern
    Insert a line before/after the matching pattern
    Insert a line at BOF/EOF

    Arguments:
        src: {str} -- The z/OS USS file or data set to modify.
        line: {str} -- The line to insert/replace into the src.
        regexp: {str} -- The regular expression to look for in every line of the src.
            If regexp matches, ins_aft/ins_bef will be ignored.
        ins_aft: {str} -- Insert the line after matching '*regex*' pattern or EOF.
            choices:
                - EOF
                - '*regex*'
        ins_bef: {str} -- Insert the line before matching '*regex*' pattern or BOF.
            choices:
                - BOF
                - '*regex*'
        encoding: {str} -- Encoding of the src.
        first_match: {bool} -- Take the first matching regex pattern.
        backrefs: {bool} -- Back reference

    Returns:
        str -- Information in JSON format. keys:
            cmd: {str} -- dsed shell command
            found: {int} -- Number of matching regex pattern
            changed: {bool} -- Indicates if the source was modified.
    """
    return Datasets.lineinfile(src,
                               line,
                               regexp,
                               ins_aft,
                               ins_bef,
                               encoding,
                               first_match,
                               backrefs,
                               state=True)
def delete_data_set_member(name):
    """ A wrapper around zoautil_py data set delete_members to raise exceptions on failure. """
    rc = Datasets.delete_members(name)
    if rc > 0:
        raise DatasetMemberDeleteError(name, rc)
    return
def create_data_set(name, extra_args={}):
    """ A wrapper around zoautil_py data set create to raise exceptions on failure. """
    rc = Datasets.create(name, **extra_args)
    if rc > 0:
        raise DatasetCreateError(name, rc)
    return
Beispiel #22
0
    def mvs_convert_encoding(self,
                             src,
                             dest,
                             from_code,
                             to_code,
                             src_type=None,
                             dest_type=None):
        """Convert the encoding of the data from
           1) USS to MVS(PS, PDS/E VSAM)
           2) MVS to USS
           3) MVS to MVS

        Arguments:
            src: {str} -- The input MVS data set or USS path to be converted
            dest: {str} -- The output MVS data set or USS path to be converted
            from_code: {str} -- The source code set of the input MVS data set
            to_code: {str} -- The destination code set of the output MVS data set

        Keyword Arguments:
            src_type {[type]} -- The input MVS data set or type: PS, PDS, PDSE, VSAM(KSDS) (default: {None})
            dest_type {[type]} -- The output MVS data set type (default: {None})

        Returns:
            boolean -- Indicate whether the conversion is successful or not
        """
        src = self._validate_data_set_or_path(src)
        dest = self._validate_data_set_or_path(dest)
        from_code = self._validate_encoding(from_code)
        to_code = self._validate_encoding(to_code)
        convert_rc = False
        temp_ps = None
        temp_src = src
        temp_dest = dest
        try:
            if src_type == "PS":
                temp_src_fo = NamedTemporaryFile()
                temp_src = temp_src_fo.name
                rc, out, err = copy.copy_ps2uss(src, temp_src)
            if src_type == "PO":
                temp_src_fo = TemporaryDirectory()
                temp_src = temp_src_fo.name
                rc, out, err = copy.copy_pds2uss(src, temp_src)
            if src_type == "VSAM":
                reclen, space_u = self.listdsi_data_set(src.upper())
                temp_ps = self.temp_data_set(reclen, space_u)
                rc, out, err = copy.copy_vsam_ps(src.upper(), temp_ps)
                temp_src_fo = NamedTemporaryFile()
                temp_src = temp_src_fo.name
                rc, out, err = copy.copy_ps2uss(temp_ps, temp_src)
            if dest_type == "PS" or dest_type == "VSAM":
                temp_dest_fo = NamedTemporaryFile()
                temp_dest = temp_dest_fo.name
            if dest_type == "PO":
                temp_dest_fo = TemporaryDirectory()
                temp_dest = temp_dest_fo.name
            rc = self.uss_convert_encoding_prev(temp_src, temp_dest, from_code,
                                                to_code)
            if rc:
                if not dest_type:
                    convert_rc = True
                else:
                    if dest_type == "VSAM":
                        reclen, space_u = self.listdsi_data_set(dest.upper())
                        temp_ps = self.temp_data_set(reclen, space_u)
                        rc, out, err = copy.copy_uss2mvs(
                            temp_dest, temp_ps, "PS")
                        rc, out, err = copy.copy_vsam_ps(temp_ps, dest.upper())
                        convert_rc = True
                    elif dest_type == "PO":
                        for (dir, subdir, files) in walk(temp_dest):
                            for file in files:
                                temp_file = path.join(dir, file)
                                rc, out, err = copy.copy_uss2mvs(
                                    temp_file, dest, "PO")
                                convert_rc = True
                    else:
                        rc, out, err = copy.copy_uss2mvs(
                            temp_dest, dest, dest_type)
                        convert_rc = True
        except Exception:
            raise
        finally:
            if temp_ps:
                Datasets.delete(temp_ps)
        return convert_rc
Beispiel #23
0
def run_module():
    # ********************************************************** #
    #                Module initialization                       #
    # ********************************************************** #
    module = AnsibleModule(argument_spec=dict(
        src=dict(required=True, type="str"),
        dest=dict(required=True, type="path"),
        fail_on_missing=dict(required=False, default=True, type="bool"),
        flat=dict(required=False, default=True, type="bool"),
        is_binary=dict(required=False, default=False, type="bool"),
        use_qualifier=dict(required=False, default=False, type="bool"),
        validate_checksum=dict(required=False, default=True, type="bool"),
        encoding=dict(required=False, type="dict"),
    ))

    src = module.params.get("src")
    if module.params.get("use_qualifier"):
        module.params["src"] = Datasets.hlq() + "." + src

    # ********************************************************** #
    #                   Verify paramater validity                #
    # ********************************************************** #

    arg_def = dict(
        src=dict(arg_type="data_set_or_path", required=True),
        dest=dict(arg_type="path", required=True),
        fail_on_missing=dict(arg_type="bool", required=False, default=True),
        is_binary=dict(arg_type="bool", required=False, default=False),
        use_qualifier=dict(arg_type="bool", required=False, default=False),
    )

    if module.params.get("encoding"):
        module.params.update(
            dict(
                from_encoding=module.params.get("encoding").get("from"),
                to_encoding=module.params.get("encoding").get("to"),
            ))
        arg_def.update(
            dict(
                from_encoding=dict(arg_type="encoding"),
                to_encoding=dict(arg_type="encoding"),
            ))

    fetch_handler = FetchHandler(module)

    try:
        parser = better_arg_parser.BetterArgParser(arg_def)
        parsed_args = parser.parse_args(module.params)
    except ValueError as err:
        module.fail_json(msg="Parameter verification failed", stderr=str(err))
    src = parsed_args.get("src")
    b_src = to_bytes(src)
    fail_on_missing = boolean(parsed_args.get("fail_on_missing"))
    use_qualifier = boolean(parsed_args.get("use_qualifier"))
    is_binary = boolean(parsed_args.get("is_binary"))
    encoding = module.params.get("encoding")

    # ********************************************************** #
    #  Check for data set existence and determine its type       #
    # ********************************************************** #

    res_args = dict()
    _fetch_member = "(" in src and src.endswith(")")
    ds_name = src if not _fetch_member else src[:src.find("(")]
    try:
        ds_utils = data_set.DataSetUtils(ds_name)
        if not ds_utils.exists():
            if fail_on_missing:
                module.fail_json(msg=("The source '{0}' does not exist or is "
                                      "uncataloged".format(ds_name)))
            module.exit_json(
                note=("Source '{0}' was not found. No data was fetched".format(
                    ds_name)))
        ds_type = ds_utils.ds_type()
        if not ds_type:
            module.fail_json(msg="Unable to determine data set type")

    except Exception as err:
        module.fail_json(msg="Error while gathering data set information",
                         stderr=str(err))

    # ********************************************************** #
    #                  Fetch a sequential data set               #
    # ********************************************************** #

    if ds_type == "PS":
        file_path = fetch_handler._fetch_mvs_data(src, is_binary, encoding)
        res_args["remote_path"] = file_path

    # ********************************************************** #
    #    Fetch a partitioned data set or one of its members      #
    # ********************************************************** #

    elif ds_type == "PO":
        if _fetch_member:
            member_name = src[src.find("(") + 1:src.find(")")]
            if not ds_utils.member_exists(member_name):
                module.fail_json(
                    msg=("The data set member '{0}' was not found inside data "
                         "set '{1}'").format(member_name, ds_name))
            file_path = fetch_handler._fetch_mvs_data(src, is_binary, encoding)
            res_args["remote_path"] = file_path
        else:
            res_args["remote_path"] = fetch_handler._fetch_pdse(
                src, is_binary, encoding)

    # ********************************************************** #
    #                  Fetch a USS file                          #
    # ********************************************************** #

    elif ds_type == "USS":
        if not os.access(b_src, os.R_OK):
            module.fail_json(
                msg="File '{0}' does not have appropriate read permission".
                format(src))
        file_path = fetch_handler._fetch_uss_file(src, is_binary, encoding)
        res_args["remote_path"] = file_path

    # ********************************************************** #
    #                  Fetch a VSAM data set                     #
    # ********************************************************** #

    elif ds_type == "VSAM":
        file_path = fetch_handler._fetch_vsam(src, is_binary, encoding)
        res_args["remote_path"] = file_path

    res_args["file"] = ds_name
    res_args["ds_type"] = ds_type
    module.exit_json(**res_args)
Beispiel #24
0
#Import the Z Open Automation Utilities libraries we need
from zoautil_py import MVSCmd, Datasets
from zoautil_py.types import DDStatement
import os

# Grab the environment variable for USER, which should be equal to your Zxxxxx userid
USERID = os.getenv('USER')
dataset_to_list = "WORK"

target_dataset = USERID + "." + dataset_to_list
ds_members = Datasets.list_members(target_dataset)
print(ds_members)
Beispiel #25
0
logging.config.fileConfig('logging.conf')

# Create list of DD statements for MVSCmd
dd_statements = []
dd_statements.append(
    DDStatement(ddName="sortin01", dataset="USR.MVSCMD.DFSORT.MASTER"))
dd_statements.append(
    DDStatement(ddName="sortin02", dataset="USR.MVSCMD.DFSORT.NEW"))
dd_statements.append(
    DDStatement(ddName="sysin", dataset="USR.MVSCMD.DFSORT.CMD"))
dd_statements.append(
    DDStatement(ddName="sortout", dataset="USR.MVSCMD.DFSORT.MERGE"))
dd_statements.append(DDStatement(ddName="sysout", dataset="*"))

# Delete datasets if already exist
Datasets.delete("USR.MVSCMD.DFSORT.*")

# Create datasets
Datasets.create("USR.MVSCMD.DFSORT.MASTER", type="SEQ")
Datasets.create("USR.MVSCMD.DFSORT.NEW", type="SEQ")
Datasets.create("USR.MVSCMD.DFSORT.CMD", type="SEQ")
Datasets.create("USR.MVSCMD.DFSORT.MERGE", type="SEQ")

# Write command to USR.MVSCMD.DFSORT.CMD
Datasets.write("USR.MVSCMD.DFSORT.CMD", " MERGE FORMAT=CH,FIELDS=(1,9,A)")

# Write example text to USR.MVSCMD.DFSORT.MASTER
Datasets.write("USR.MVSCMD.DFSORT.MASTER", "Chang Joe 278 232 6043")
Datasets.write("USR.MVSCMD.DFSORT.MASTER",
               "DeBeer Jo 348 132 6023",
               append=True)
Beispiel #26
0
#Import the Z Open Automation Utilities libraries we need
from zoautil_py import MVSCmd, Datasets
from zoautil_py.types import DDStatement
# Import datetime, needed so we can format the report
from datetime import datetime
# Import os, needed to get the environment variables
import os

#Take the contents of this data set and read it into cc_contents
cc_contents = Datasets.read("MTM2020.PUBLIC.CUST16")

USERID = os.getenv("USER")
output_dataset = USERID + ".OUTPUT.CCINVALD"
#Delete the output dataset if it already exists
if Datasets.exists(output_dataset):
    Datasets.delete(output_dataset)

Datasets.create(output_dataset, "SEQ")

# Create a new SEQUENTIAL DATA SET with the name of output_dataset


#A function that checks to see if the number passed to it is even. Returns True or False (Boolean)
def is_even(num_to_check
            ):  # this is a function. num_to_check is what gets sent to it
    if ((num_to_check %
         2) == 0):  # a simple check to see if num_to_check is even.
        result = True  # We set result to True
        return result  # and then return it.
    else:  # if it isn't
        result = False  # set return to False
Beispiel #27
0
else:
    print("VALID")



----------------------------------------------------------------------------------------------
#Import the Z Open Automation Utilities libraries we need
from zoautil_py import MVSCmd, Datasets
from zoautil_py.types import DDStatement
# Import datetime, needed so we can format the report
from datetime import datetime
# Import os, needed to get the environment variables
import os

#Take the contents of this data set and read it into cc_contents
cc_contents = Datasets.read("MTM2020.PUBLIC.CUST16")

USERID = os.getenv('USER')
output_dataset=USERID+".OUTPUT.CCINVALD"
#Delete the output dataset if it already exists
if Datasets.exists(output_dataset):
    Datasets.delete(output_dataset)
# Use this line to create a new SEQUENTIAL DATA SET with the name of output_dataset
# (hint: https://www.ibm.com/support/knowledgecenter/SSKFYE_1.0.1/python_doc_zoautil/api/datasets.html?view=embed)


#A function that checks to see if the number passed to it is even. Returns True or False (Boolean)
def is_even(num_to_check):              # this is a function. num_to_check is what gets sent to it
    if ((num_to_check % 2) == 0):       # a simple check to see if num_to_check is even.
        result = True                   # We set result to True
        return result                   # and then return it.
Beispiel #28
0
#Import the Z Open Automation Utilities libraries we need
from zoautil_py import MVSCmd, Datasets
from zoautil_py.types import DDStatement
# Import datetime, needed so we can format the report
from datetime import datetime
# Import os, needed to get the environment variables
import os

#Take the contents of this data set and read it into cc_contents
cc_contents = Datasets.read("MTM2020.PUBLIC.CUST16")

USERID = os.getenv('USER')
output_dataset = USERID + ".OUTPUT(CCINVALD)"
#Delete the output dataset if it already exists
#if Datasets.exists(output_dataset):
#    Datasets.delete(output_dataset)
# Use this line to create a new SEQUENTIAL DATA SET with the name of output_dataset
# (hint: https://www.ibm.com/support/knowledgecenter/SSKFYE_1.0.1/python_doc_zoautil/api/datasets.html?view=embed)
#Datasets.create(output_dataset, type ="SEQ")


def luhn(card_number):
    def digits_of(n):
        return [int(d) for d in str(n)]

    digits = digits_of(card_number)
    odd_digits = digits[-1::-2]
    even_digits = digits[-2::-2]
    checksum = 0
    checksum += sum(odd_digits)
    for d in even_digits:
Beispiel #29
0
import mysql.connector
#Import the Z Open Automation Utilities libraries we need
from zoautil_py import MVSCmd, Datasets, Jobs
from zoautil_py.types import DDStatement
# Import datetime, needed so we can format the report
from datetime import datetime
# Import os, needed to get the environment variables
import os
import string
import time
#Take the contents of this data set and read it into content variables
USERID = os.getenv('USER')
hlq = Datasets.hlq()
print("running jobs for ", hlq)
damember = "%s.OUTPUT.SDSFDAS" % hlq
da_contents = Datasets.read(damember)
#ckmember="%s.OUTPUT.SDSFCKS" % hlq
#hc_contents = Datasets.read(ckmember)

now = datetime.now()
formatted_date = now.strftime('%Y-%m-%d %H:%M:%S')

#Submit the job
jclmember = "%s.JCL(SDSFJCL)" % hlq
jobid = Jobs.submit(dataset=jclmember)
while True:
    js = Jobs.list(job_id=jobid)[0]
    if (js["return"] != "?"):
        break
    else:
        print(js)
#Import the Z Open Automation Utilities libraries we need
from zoautil_py import MVSCmd, Datasets
from zoautil_py.types import DDStatement
# Import datetime, needed so we can format the report
from datetime import datetime
# Import os, needed to get the environment variables
import os

#Take the contents of this data set and read it into cc_contents
cc_contents = Datasets.read("MTM2020.PUBLIC.CUST16")

USERID = os.getenv('USER')
output_dataset=USERID+".OUTPUT.CCINVALD"
#Delete the output dataset if it already exists
if Datasets.exists(output_dataset):
    Datasets.delete(output_dataset)
# Use this line to create a new SEQUENTIAL DATA SET with the name of output_dataset
# (hint: https://www.ibm.com/support/knowledgecenter/SSKFYE_1.0.1/python_doc_zoautil/api/datasets.html?view=embed)


def luhn(card_number):
    def digits_of(n):
        return [int(d) for d in str(n)]
    digits = digits_of(card_number)
    odd_digits = digits[-1::-2]
    even_digits = digits[-2::-2]
    checksum = 0
    checksum += sum(odd_digits)
    for d in even_digits:
        checksum += sum(digits_of(d*2))
    return (checksum % 10)