コード例 #1
0
def mode_pre(session_dir, args):
    global gtmpfilename
    """
    Read from Session file and write to session.pre file
    """
    endtime_to_update = int(time.time()) - get_changelog_rollover_time(
        args.volume)
    status_file = os.path.join(session_dir, args.volume, "status")
    status_file_pre = status_file + ".pre"

    mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)

    # If Pre status file exists and running pre command again
    if os.path.exists(status_file_pre) and not args.regenerate_outfile:
        fail("Post command is not run after last pre, "
             "use --regenerate-outfile")

    start = 0
    try:
        with open(status_file) as f:
            start = int(f.read().strip())
    except ValueError:
        pass
    except (OSError, IOError) as e:
        fail("Error Opening Session file %s: %s" % (status_file, e),
             logger=logger)

    logger.debug("Pre is called - Session: %s, Volume: %s, "
                 "Start time: %s, End time: %s" %
                 (args.session, args.volume, start, endtime_to_update))

    prefix = datetime.now().strftime("%Y%m%d-%H%M%S-%f-")
    gtmpfilename = prefix + next(tempfile._get_candidate_names())

    run_cmd_nodes("pre", args, start=start, end=-1, tmpfilename=gtmpfilename)

    # Merger
    if args.full:
        cmd = ["sort", "-u"] + node_outfiles + ["-o", args.outfile]
        execute(cmd,
                exit_msg="Failed to merge output files "
                "collected from nodes",
                logger=logger)
    else:
        # Read each Changelogs db and generate finaldb
        create_file(args.outfile, exit_on_err=True, logger=logger)
        outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)
        write_output(args.outfile, outfilemerger, args.field_separator)

    try:
        os.remove(args.outfile + ".db")
    except (IOError, OSError):
        pass

    run_cmd_nodes("cleanup", args, tmpfilename=gtmpfilename)

    with open(status_file_pre, "w", buffering=0) as f:
        f.write(str(endtime_to_update))

    sys.stdout.write("Generated output file %s\n" % args.outfile)
コード例 #2
0
def mode_query(session_dir, args):
    # Verify volume status
    cmd = ["gluster", 'volume', 'info', args.volume, "--xml"]
    _, data, _ = execute(cmd,
                         exit_msg="Failed to Run Gluster Volume Info",
                         logger=logger)
    try:
        tree = etree.fromstring(data)
        statusStr = tree.find('volInfo/volumes/volume/statusStr').text
    except (ParseError, AttributeError) as e:
        fail("Invalid Volume: %s" % e, logger=logger)

    if statusStr != "Started":
        fail("Volume %s is not online" % args.volume, logger=logger)

    mkdirp(session_dir, exit_on_err=True, logger=logger)
    mkdirp(os.path.join(session_dir, args.volume),
           exit_on_err=True,
           logger=logger)
    mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)

    # Configure cluster for pasword-less SSH
    ssh_setup(args)

    # Enable volume options for changelog capture
    enable_volume_options(args)

    # Start query command processing
    if args.since_time:
        start = args.since_time
        logger.debug("Query is called - Session: %s, Volume: %s, "
                     "Start time: %s" % ("default", args.volume, start))

        run_cmd_nodes("query", args, start=start)

        # Merger
        # Read each Changelogs db and generate finaldb
        create_file(args.outfile, exit_on_err=True, logger=logger)
        outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)
        write_output(args, outfilemerger)

        try:
            os.remove(args.outfile + ".db")
        except (IOError, OSError):
            pass

        run_cmd_nodes("cleanup", args)

        sys.stdout.write("Generated output file %s\n" % args.outfile)
    else:
        fail("Please specify --since-time option")
コード例 #3
0
def mode_query(session_dir, args):
    global gtmpfilename

    # Verify volume status
    cmd = ["gluster", 'volume', 'info', args.volume, "--xml"]
    _, data, _ = execute(cmd,
                         exit_msg="Failed to Run Gluster Volume Info",
                         logger=logger)
    try:
        tree = etree.fromstring(data)
        statusStr = tree.find('volInfo/volumes/volume/statusStr').text
    except (ParseError, AttributeError) as e:
        fail("Invalid Volume: %s" % e, logger=logger)

    if statusStr != "Started":
        fail("Volume %s is not online" % args.volume, logger=logger)

    mkdirp(session_dir, exit_on_err=True, logger=logger)
    mkdirp(os.path.join(session_dir, args.volume),
           exit_on_err=True,
           logger=logger)
    mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)

    # Configure cluster for pasword-less SSH
    ssh_setup(args)

    # Enable volume options for changelog capture
    enable_volume_options(args)

    # Test options
    if not args.since_time and not args.end_time and not args.full:
        fail(
            "Please specify either {--since-time and optionally --end-time} "
            "or --full",
            logger=logger)

    if args.since_time and args.end_time and args.full:
        fail(
            "Please specify either {--since-time and optionally --end-time} "
            "or --full, but not both",
            logger=logger)

    if args.end_time and not args.since_time:
        fail("Please specify --since-time as well", logger=logger)

    # Start query command processing
    start = -1
    end = -1
    if args.since_time:
        start = args.since_time
        if args.end_time:
            end = args.end_time
    else:
        start = 0  # --full option is handled separately

    logger.debug("Query is called - Session: %s, Volume: %s, "
                 "Start time: %s, End time: %s" %
                 ("default", args.volume, start, end))

    prefix = datetime.now().strftime("%Y%m%d-%H%M%S-%f-")
    gtmpfilename = prefix + next(tempfile._get_candidate_names())

    run_cmd_nodes("query",
                  args,
                  start=start,
                  end=end,
                  tmpfilename=gtmpfilename)

    # Merger
    if args.full:
        cmd = ["sort", "-u"] + node_outfiles + ["-o", args.outfile]
        execute(cmd,
                exit_msg="Failed to merge output files "
                "collected from nodes",
                logger=logger)
    else:
        # Read each Changelogs db and generate finaldb
        create_file(args.outfile, exit_on_err=True, logger=logger)
        outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)
        write_output(args.outfile, outfilemerger, args.field_separator)

    try:
        os.remove(args.outfile + ".db")
    except (IOError, OSError):
        pass

    run_cmd_nodes("cleanup", args, tmpfilename=gtmpfilename)

    sys.stdout.write("Generated output file %s\n" % args.outfile)
コード例 #4
0
ファイル: main.py プロジェクト: bcicen/glusterfs
def mode_pre(session_dir, args):
    """
    Read from Session file and write to session.pre file
    """
    endtime_to_update = int(time.time()) - get_changelog_rollover_time(
        args.volume)
    status_file = os.path.join(session_dir, args.volume, "status")
    status_file_pre = status_file + ".pre"

    mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)

    # If Pre status file exists and running pre command again
    if os.path.exists(status_file_pre) and not args.regenerate_outfile:
        fail("Post command is not run after last pre, "
             "use --regenerate-outfile")

    start = 0
    try:
        with open(status_file) as f:
            start = int(f.read().strip())
    except ValueError:
        pass
    except (OSError, IOError) as e:
        fail("Error Opening Session file %s: %s"
             % (status_file, e), logger=logger)

    logger.debug("Pre is called - Session: %s, Volume: %s, "
                 "Start time: %s, End time: %s"
                 % (args.session, args.volume, start, endtime_to_update))

    run_cmd_nodes("pre", args, start=start)

    # Merger
    if args.full:
        cmd = ["sort", "-u"] + node_outfiles + ["-o", args.outfile]
        execute(cmd,
                exit_msg="Failed to merge output files "
                "collected from nodes", logger=logger)
    else:
        # Read each Changelogs db and generate finaldb
        create_file(args.outfile, exit_on_err=True, logger=logger)
        outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)

        with open(args.outfile, "a") as f:
            for row in outfilemerger.get():
                # Multiple paths in case of Hardlinks
                paths = row[1].split(",")
                for p in paths:
                    if p == "":
                        continue
                    f.write("%s %s %s\n" % (row[0], p, row[2]))

    try:
        os.remove(args.outfile + ".db")
    except (IOError, OSError):
        pass

    run_cmd_nodes("cleanup", args)

    with open(status_file_pre, "w", buffering=0) as f:
        f.write(str(endtime_to_update))

    sys.stdout.write("Generated output file %s\n" % args.outfile)
コード例 #5
0
def mode_pre(session_dir, args):
    """
    Read from Session file and write to session.pre file
    """
    endtime_to_update = int(time.time()) - get_changelog_rollover_time(
        args.volume)
    status_file = os.path.join(session_dir, args.volume, "status")
    status_file_pre = status_file + ".pre"

    mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)

    # If Pre status file exists and running pre command again
    if os.path.exists(status_file_pre) and not args.regenerate_outfile:
        fail("Post command is not run after last pre, "
             "use --regenerate-outfile")

    start = 0
    try:
        with open(status_file) as f:
            start = int(f.read().strip())
    except ValueError:
        pass
    except (OSError, IOError) as e:
        fail("Error Opening Session file %s: %s" % (status_file, e),
             logger=logger)

    logger.debug("Pre is called - Session: %s, Volume: %s, "
                 "Start time: %s, End time: %s" %
                 (args.session, args.volume, start, endtime_to_update))

    run_cmd_nodes("pre", args, start=start)

    # Merger
    if args.full:
        cmd = ["sort", "-u"] + node_outfiles + ["-o", args.outfile]
        execute(cmd,
                exit_msg="Failed to merge output files "
                "collected from nodes",
                logger=logger)
    else:
        # Read each Changelogs db and generate finaldb
        create_file(args.outfile, exit_on_err=True, logger=logger)
        outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)

        with open(args.outfile, "a") as f:
            for row in outfilemerger.get():
                # Multiple paths in case of Hardlinks
                paths = row[1].split(",")
                for p in paths:
                    if p == "":
                        continue
                    f.write("%s %s %s\n" % (row[0], p, row[2]))

    try:
        os.remove(args.outfile + ".db")
    except (IOError, OSError):
        pass

    run_cmd_nodes("cleanup", args)

    with open(status_file_pre, "w", buffering=0) as f:
        f.write(str(endtime_to_update))

    sys.stdout.write("Generated output file %s\n" % args.outfile)