Пример #1
0
def node_run(volume, host, path, start, outfile, args, fallback=False):
    """
    If host is local node, execute the command locally. If not local
    execute the CHANGE_DETECTOR command via ssh and copy the output file from
    remote node using scp.
    """
    localdir = is_host_local(host)
    pem_key_path = get_pem_key_path(args.session, args.volume)

    # If Full backup is requested or start time is zero, use brickfind
    change_detector = conf.get_change_detector(args.change_detector)
    if ((start == 0 or args.full) and args.change_detector == "changelog") or \
       fallback:
        change_detector = conf.get_change_detector("brickfind")

    # CHANGE_DETECTOR <SESSION> <VOLUME> <BRICK> <OUTFILE> <START> --debug
    # --gfidpath <TYPE>
    cmd = [change_detector,
           args.session,
           volume,
           path,
           outfile,
           str(start),
           "--output-prefix",
           args.output_prefix] + \
        (["--debug"] if args.debug else []) + \
        (["--full"] if args.full else [])

    if not localdir:
        # prefix with ssh command if not local node
        cmd = ["ssh",
               "-i", pem_key_path,
               "root@%s" % host] + cmd

    rc, out, err = execute(cmd, logger=logger)
    if rc == 2:
        # Partial History Fallback
        logger.info("%s %s Fallback to brickfind" % (host, err.strip()))
        # Exit only from process, handled in main.
        sys.exit(rc)
    elif rc != 0:
        fail("%s - Change detection failed" % host, logger=logger)

    if not localdir:
        cmd_copy = ["scp",
                    "-i", pem_key_path,
                    "root@%s:/%s" % (host, outfile),
                    os.path.dirname(outfile)]
        execute(cmd_copy, exit_msg="%s - Copy command failed" % host,
                logger=logger)
Пример #2
0
def node_run(volume, host, path, start, outfile, args, fallback=False):
    """
    If host is local node, execute the command locally. If not local
    execute the CHANGE_DETECTOR command via ssh and copy the output file from
    remote node using scp.
    """
    localdir = is_host_local(host)

    # If Full backup is requested or start time is zero, use brickfind
    change_detector = conf.get_change_detector(args.change_detector)
    if ((start == 0 or args.full) and args.change_detector == "changelog") or \
       fallback:
        change_detector = conf.get_change_detector("brickfind")

    # CHANGE_DETECTOR <SESSION> <VOLUME> <BRICK> <OUTFILE> <START> --debug
    # --gfidpath <TYPE>
    cmd = [change_detector,
           args.session,
           volume,
           path,
           outfile,
           str(start),
           "--output-prefix",
           args.output_prefix] + \
        (["--debug"] if args.debug else []) + \
        (["--full"] if args.full else [])

    if not localdir:
        # prefix with ssh command if not local node
        cmd = ["ssh", "-i", conf.get_opt("secret_pem"), "root@%s" % host] + cmd

    rc, out, err = execute(cmd, logger=logger)
    if rc == 2:
        # Partial History Fallback
        logger.info("%s %s Fallback to brickfind" % (host, err.strip()))
        # Exit only from process, handled in main.
        sys.exit(rc)
    elif rc != 0:
        fail("%s - Change detection failed" % host, logger=logger)

    if not localdir:
        cmd_copy = [
            "scp", "-i",
            conf.get_opt("secret_pem"),
            "root@%s:/%s" % (host, outfile),
            os.path.dirname(outfile)
        ]
        execute(cmd_copy,
                exit_msg="%s - Copy command failed" % host,
                logger=logger)
Пример #3
0
def run_cmd_nodes(task, args, **kwargs):
    global node_outfiles
    nodes = get_nodes(args.volume)
    pool = []
    for num, node in enumerate(nodes):
        host, brick = node[1].split(":")
        host_uuid = node[0]
        cmd = []
        opts = {}

        # tmpfilename is valid only for tasks: pre, query and cleanup
        tmpfilename = kwargs.get("tmpfilename", "BADNAME")

        node_outfile = os.path.join(conf.get_opt("working_dir"), args.session,
                                    args.volume, tmpfilename,
                                    "tmp_output_%s" % num)

        if task == "pre":
            if vol_statusStr != "Started":
                fail("Volume %s is not online" % args.volume, logger=logger)

            # If Full backup is requested or start time is zero, use brickfind
            change_detector = conf.get_change_detector("changelog")
            tag = None
            if args.full:
                change_detector = conf.get_change_detector("brickfind")
                tag = args.tag_for_full_find.strip()
                if tag == "":
                    tag = '""' if not is_host_local(host_uuid) else ""

            node_outfiles.append(node_outfile)
            # remote file will be copied into this directory
            mkdirp(os.path.dirname(node_outfile),
                   exit_on_err=True,
                   logger=logger)

            FS = args.field_separator
            if not is_host_local(host_uuid):
                FS = "'" + FS + "'"

            cmd = [change_detector,
                   args.session,
                   args.volume,
                   host,
                   brick,
                   node_outfile] + \
                ([str(kwargs.get("start")), str(kwargs.get("end"))]
                    if not args.full else []) + \
                ([tag] if tag is not None else []) + \
                ["--output-prefix", args.output_prefix] + \
                (["--debug"] if args.debug else []) + \
                (["--no-encode"] if args.no_encode else []) + \
                (["--only-namespace-changes"] if args.only_namespace_changes
                 else []) + \
                (["--field-separator", FS] if args.full else [])

            opts["node_outfile"] = node_outfile
            opts["copy_outfile"] = True
        elif task == "query":
            # If Full backup is requested or start time is zero, use brickfind
            tag = None
            change_detector = conf.get_change_detector("changelog")
            if args.full:
                change_detector = conf.get_change_detector("brickfind")
                tag = args.tag_for_full_find.strip()
                if tag == "":
                    tag = '""' if not is_host_local(host_uuid) else ""

            node_outfiles.append(node_outfile)
            # remote file will be copied into this directory
            mkdirp(os.path.dirname(node_outfile),
                   exit_on_err=True,
                   logger=logger)

            FS = args.field_separator
            if not is_host_local(host_uuid):
                FS = "'" + FS + "'"

            cmd = [change_detector,
                   args.session,
                   args.volume,
                   host,
                   brick,
                   node_outfile] + \
                ([str(kwargs.get("start")), str(kwargs.get("end"))]
                    if not args.full else []) + \
                ([tag] if tag is not None else []) + \
                ["--only-query"] + \
                ["--output-prefix", args.output_prefix] + \
                (["--debug"] if args.debug else []) + \
                (["--no-encode"] if args.no_encode else []) + \
                (["--only-namespace-changes"]
                    if args.only_namespace_changes else []) + \
                (["--field-separator", FS] if args.full else [])

            opts["node_outfile"] = node_outfile
            opts["copy_outfile"] = True
        elif task == "cleanup":
            # After pre/query run, cleanup the working directory and other
            # temp files. Remove the directory to which node_outfile has
            # been copied in main node
            try:
                os.remove(node_outfile)
            except (OSError, IOError):
                logger.warn("Failed to cleanup temporary file %s" %
                            node_outfile)
                pass

            cmd = [conf.get_opt("nodeagent"),
                   "cleanup",
                   args.session,
                   args.volume,
                   os.path.dirname(node_outfile)] + \
                (["--debug"] if args.debug else [])
        elif task == "create":
            if vol_statusStr != "Started":
                fail("Volume %s is not online" % args.volume, logger=logger)

            # When glusterfind create, create session directory in
            # each brick nodes
            cmd = [conf.get_opt("nodeagent"),
                   "create",
                   args.session,
                   args.volume,
                   brick,
                   kwargs.get("time_to_update")] + \
                (["--debug"] if args.debug else []) + \
                (["--reset-session-time"] if args.reset_session_time
                 else [])
        elif task == "post":
            # Rename pre status file to actual status file in each node
            cmd = [conf.get_opt("nodeagent"),
                   "post",
                   args.session,
                   args.volume,
                   brick] + \
                (["--debug"] if args.debug else [])
        elif task == "delete":
            # When glusterfind delete, cleanup all the session files/dirs
            # from each node.
            cmd = [conf.get_opt("nodeagent"),
                   "delete",
                   args.session,
                   args.volume] + \
                (["--debug"] if args.debug else [])

        if cmd:
            p = Process(target=node_cmd,
                        args=(host, host_uuid, task, cmd, args, opts))
            p.start()
            pool.append(p)

    for num, p in enumerate(pool):
        p.join()
        if p.exitcode != 0:
            logger.warn("Command %s failed in %s" % (task, nodes[num][1]))
            if task in ["create", "delete"]:
                fail("Command %s failed in %s" % (task, nodes[num][1]))
            elif task == "pre" and args.disable_partial:
                sys.exit(1)
Пример #4
0
def run_cmd_nodes(task, args, **kwargs):
    global node_outfiles
    nodes = get_nodes(args.volume)
    pool = []
    for num, node in enumerate(nodes):
        host, brick = node[1].split(":")
        host_uuid = node[0]
        cmd = []
        opts = {}
        node_outfile = os.path.join(conf.get_opt("working_dir"),
                                    args.session, args.volume,
                                    "tmp_output_%s" % num)

        if task == "pre":
            if vol_statusStr == "Stopped":
                fail("Volume %s is in stopped state" % args.volume,
                    logger=logger)

            # If Full backup is requested or start time is zero, use brickfind
            change_detector = conf.get_change_detector("changelog")
            if args.full:
                change_detector = conf.get_change_detector("brickfind")

            node_outfiles.append(node_outfile)

            cmd = [change_detector,
                   args.session,
                   args.volume,
                   brick,
                   node_outfile,
                   str(kwargs.get("start")),
                   "--output-prefix",
                   args.output_prefix] + \
                (["--debug"] if args.debug else []) + \
                (["--only-namespace-changes"] if args.only_namespace_changes
                 else [])

            opts["node_outfile"] = node_outfile
            opts["copy_outfile"] = True
        elif task == "cleanup":
            # After pre run, cleanup the working directory and other temp files
            # Remove the copied node_outfile in main node
            try:
                os.remove(node_outfile)
            except (OSError, IOError):
                logger.warn("Failed to cleanup temporary file %s" %
                            node_outfile)
                pass

            cmd = [conf.get_opt("nodeagent"),
                   "cleanup",
                   args.session,
                   args.volume] + (["--debug"] if args.debug else [])
        elif task == "create":
            if vol_statusStr == "Stopped":
                fail("Volume %s is in stopped state" % args.volume,
                    logger=logger)

            # When glusterfind create, create session directory in
            # each brick nodes
            cmd = [conf.get_opt("nodeagent"),
                   "create",
                   args.session,
                   args.volume,
                   brick,
                   kwargs.get("time_to_update")] + \
                (["--debug"] if args.debug else []) + \
                (["--reset-session-time"] if args.reset_session_time
                 else [])
        elif task == "post":
            # Rename pre status file to actual status file in each node
            cmd = [conf.get_opt("nodeagent"),
                   "post",
                   args.session,
                   args.volume,
                   brick] + \
                (["--debug"] if args.debug else [])
        elif task == "delete":
            # When glusterfind delete, cleanup all the session files/dirs
            # from each node.
            cmd = [conf.get_opt("nodeagent"),
                   "delete",
                   args.session,
                   args.volume] + \
                (["--debug"] if args.debug else [])

        if cmd:
            p = Process(target=node_cmd,
                        args=(host, host_uuid, task, cmd, args, opts))
            p.start()
            pool.append(p)

    for num, p in enumerate(pool):
        p.join()
        if p.exitcode != 0:
            logger.warn("Command %s failed in %s" % (task, nodes[num][1]))
            if task in ["create", "delete"]:
                fail("Command %s failed in %s" % (task, nodes[num][1]))
            elif task == "pre" and args.disable_partial:
                sys.exit(1)
Пример #5
0
def run_cmd_nodes(task, args, **kwargs):
    global node_outfiles
    nodes = get_nodes(args.volume)
    pool = []
    for num, node in enumerate(nodes):
        host, brick = node[1].split(":")
        host_uuid = node[0]
        cmd = []
        opts = {}

        # tmpfilename is valid only for tasks: pre, query and cleanup
        tmpfilename = kwargs.get("tmpfilename", "BADNAME")

        node_outfile = os.path.join(conf.get_opt("working_dir"),
                                    args.session, args.volume,
                                    tmpfilename,
                                    "tmp_output_%s" % num)

        if task == "pre":
            if vol_statusStr != "Started":
                fail("Volume %s is not online" % args.volume,
                     logger=logger)

            # If Full backup is requested or start time is zero, use brickfind
            change_detector = conf.get_change_detector("changelog")
            tag = None
            if args.full:
                change_detector = conf.get_change_detector("brickfind")
                tag = args.tag_for_full_find.strip()
                if tag == "":
                    tag = '""' if not is_host_local(host_uuid) else ""

            node_outfiles.append(node_outfile)
            # remote file will be copied into this directory
            mkdirp(os.path.dirname(node_outfile),
                   exit_on_err=True, logger=logger)

            FS = args.field_separator
            if not is_host_local(host_uuid):
                FS = "'" + FS + "'"

            cmd = [change_detector,
                   args.session,
                   args.volume,
                   host,
                   brick,
                   node_outfile] + \
                ([str(kwargs.get("start")), str(kwargs.get("end"))]
                    if not args.full else []) + \
                ([tag] if tag is not None else []) + \
                ["--output-prefix", args.output_prefix] + \
                (["--debug"] if args.debug else []) + \
                (["--no-encode"] if args.no_encode else []) + \
                (["--only-namespace-changes"] if args.only_namespace_changes
                 else []) + \
                (["--type", args.type]) + \
                (["--field-separator", FS] if args.full else [])

            opts["node_outfile"] = node_outfile
            opts["copy_outfile"] = True
        elif task == "query":
            # If Full backup is requested or start time is zero, use brickfind
            tag = None
            change_detector = conf.get_change_detector("changelog")
            if args.full:
                change_detector = conf.get_change_detector("brickfind")
                tag = args.tag_for_full_find.strip()
                if tag == "":
                    tag = '""' if not is_host_local(host_uuid) else ""

            node_outfiles.append(node_outfile)
            # remote file will be copied into this directory
            mkdirp(os.path.dirname(node_outfile),
                   exit_on_err=True, logger=logger)

            FS = args.field_separator
            if not is_host_local(host_uuid):
                FS = "'" + FS + "'"

            cmd = [change_detector,
                   args.session,
                   args.volume,
                   host,
                   brick,
                   node_outfile] + \
                ([str(kwargs.get("start")), str(kwargs.get("end"))]
                    if not args.full else []) + \
                ([tag] if tag is not None else []) + \
                ["--only-query"] + \
                ["--output-prefix", args.output_prefix] + \
                (["--debug"] if args.debug else []) + \
                (["--no-encode"] if args.no_encode else []) + \
                (["--only-namespace-changes"]
                    if args.only_namespace_changes else []) + \
                (["--type", args.type]) + \
                (["--field-separator", FS] if args.full else [])

            opts["node_outfile"] = node_outfile
            opts["copy_outfile"] = True
        elif task == "cleanup":
            # After pre/query run, cleanup the working directory and other
            # temp files. Remove the directory to which node_outfile has
            # been copied in main node
            try:
                os.remove(node_outfile)
            except (OSError, IOError):
                logger.warn("Failed to cleanup temporary file %s" %
                            node_outfile)
                pass

            cmd = [conf.get_opt("nodeagent"),
                   "cleanup",
                   args.session,
                   args.volume,
                   os.path.dirname(node_outfile)] + \
                (["--debug"] if args.debug else [])
        elif task == "create":
            if vol_statusStr != "Started":
                fail("Volume %s is not online" % args.volume,
                     logger=logger)

            # When glusterfind create, create session directory in
            # each brick nodes
            cmd = [conf.get_opt("nodeagent"),
                   "create",
                   args.session,
                   args.volume,
                   brick,
                   kwargs.get("time_to_update")] + \
                (["--debug"] if args.debug else []) + \
                (["--reset-session-time"] if args.reset_session_time
                 else [])
        elif task == "post":
            # Rename pre status file to actual status file in each node
            cmd = [conf.get_opt("nodeagent"),
                   "post",
                   args.session,
                   args.volume,
                   brick] + \
                (["--debug"] if args.debug else [])
        elif task == "delete":
            # When glusterfind delete, cleanup all the session files/dirs
            # from each node.
            cmd = [conf.get_opt("nodeagent"),
                   "delete",
                   args.session,
                   args.volume] + \
                (["--debug"] if args.debug else [])

        if cmd:
            p = Process(target=node_cmd,
                        args=(host, host_uuid, task, cmd, args, opts))
            p.start()
            pool.append(p)

    for num, p in enumerate(pool):
        p.join()
        if p.exitcode != 0:
            logger.warn("Command %s failed in %s" % (task, nodes[num][1]))
            if task in ["create", "delete"]:
                fail("Command %s failed in %s" % (task, nodes[num][1]))
            elif task == "pre" and args.disable_partial:
                sys.exit(1)
Пример #6
0
def run_cmd_nodes(task, args, **kwargs):
    global node_outfiles
    nodes = get_nodes(args.volume)
    pool = []
    for num, node in enumerate(nodes):
        host, brick = node[1].split(":")
        host_uuid = node[0]
        cmd = []
        opts = {}
        node_outfile = os.path.join(conf.get_opt("working_dir"), args.session,
                                    args.volume, "tmp_output_%s" % num)

        if task == "pre":
            if vol_statusStr != "Started":
                fail("Volume %s is not online" % args.volume, logger=logger)

            # If Full backup is requested or start time is zero, use brickfind
            change_detector = conf.get_change_detector("changelog")
            if args.full:
                change_detector = conf.get_change_detector("brickfind")

            node_outfiles.append(node_outfile)

            cmd = [change_detector,
                   args.session,
                   args.volume,
                   brick,
                   node_outfile,
                   str(kwargs.get("start")),
                   "--output-prefix",
                   args.output_prefix] + \
                (["--debug"] if args.debug else []) + \
                (["--only-namespace-changes"] if args.only_namespace_changes
                 else [])

            opts["node_outfile"] = node_outfile
            opts["copy_outfile"] = True
        elif task == "query":
            # If Full backup is requested or start time is zero, use brickfind
            change_detector = conf.get_change_detector("changelog")
            node_outfiles.append(node_outfile)

            cmd = [change_detector,
                   args.session,
                   args.volume,
                   brick,
                   node_outfile,
                   str(kwargs.get("start"))] + \
                ["--only-query"] + \
                ["--output-prefix", args.output_prefix] + \
                (["--debug"] if args.debug else []) + \
                (["--only-namespace-changes"]
                    if args.only_namespace_changes else [])

            opts["node_outfile"] = node_outfile
            opts["copy_outfile"] = True
        elif task == "cleanup":
            # After pre run, cleanup the working directory and other temp files
            # Remove the copied node_outfile in main node
            try:
                os.remove(node_outfile)
            except (OSError, IOError):
                logger.warn("Failed to cleanup temporary file %s" %
                            node_outfile)
                pass

            cmd = [
                conf.get_opt("nodeagent"), "cleanup", args.session, args.volume
            ] + (["--debug"] if args.debug else [])
        elif task == "create":
            if vol_statusStr != "Started":
                fail("Volume %s is not online" % args.volume, logger=logger)

            # When glusterfind create, create session directory in
            # each brick nodes
            cmd = [conf.get_opt("nodeagent"),
                   "create",
                   args.session,
                   args.volume,
                   brick,
                   kwargs.get("time_to_update")] + \
                (["--debug"] if args.debug else []) + \
                (["--reset-session-time"] if args.reset_session_time
                 else [])
        elif task == "post":
            # Rename pre status file to actual status file in each node
            cmd = [conf.get_opt("nodeagent"),
                   "post",
                   args.session,
                   args.volume,
                   brick] + \
                (["--debug"] if args.debug else [])
        elif task == "delete":
            # When glusterfind delete, cleanup all the session files/dirs
            # from each node.
            cmd = [conf.get_opt("nodeagent"),
                   "delete",
                   args.session,
                   args.volume] + \
                (["--debug"] if args.debug else [])

        if cmd:
            p = Process(target=node_cmd,
                        args=(host, host_uuid, task, cmd, args, opts))
            p.start()
            pool.append(p)

    for num, p in enumerate(pool):
        p.join()
        if p.exitcode != 0:
            logger.warn("Command %s failed in %s" % (task, nodes[num][1]))
            if task in ["create", "delete"]:
                fail("Command %s failed in %s" % (task, nodes[num][1]))
            elif task == "pre" and args.disable_partial:
                sys.exit(1)