Esempio n. 1
0
File: main.py Progetto: viogp/shark
def run_shark(particle, *args):

    opts, space, subvols, statTest = args

    pid = multiprocessing.current_process().pid
    shark_output_base = os.path.join(opts.outdir, 'output_%d' % pid)
    _, simu, model, _ = common.read_configuration(opts.config)
    modeldir = common.get_shark_output_dir(shark_output_base, simu, model)

    cmdline = [
        opts.shark_binary, opts.config, '-o',
        'execution.output_directory=%s' % shark_output_base, '-o',
        'execution.simulation_batches=%s' % ' '.join(map(str, subvols))
    ]
    for option in _to_shark_options(particle, space):
        cmdline += ['-o', option]
    _exec_shark('Executing shark instance', cmdline)

    total = 0
    for constraint in opts.constraints:
        y_obs, y_mod, err = constraint.get_data(modeldir, subvols)
        total += statTest(y_obs, y_mod, err)

    logger.info('Particle %r evaluated to %f', particle, total)

    if not opts.keep:
        shutil.rmtree(shark_output_base)

    return total
Esempio n. 2
0
def main():
    prefs = read_configuration(os.path.join(os.path.dirname(__file__),'fritz-speed.ini'))
    fc = fritzconnection.FritzConnection()
    status = fc.call_action('WANCommonInterfaceConfig', 'GetTotalBytesSent')
    bytes_up =  status['NewTotalBytesSent']

    status = fc.call_action('WANCommonInterfaceConfig', 'GetTotalBytesReceived')
    bytes_down =  status['NewTotalBytesReceived']
    update_rra(prefs['rra_filename'], str(bytes_up), str(bytes_down))
Esempio n. 3
0
def main():
    prefs = read_configuration(
        os.path.join(os.path.dirname(__file__), "fritz-speed.ini"))
    fc = fritzconnection.FritzConnection(address=prefs["fritz_ip"])

    status = fc.call_action("WANCommonIFC1", "GetAddonInfos")
    bytes_down = status["NewX_AVM_DE_TotalBytesReceived64"]
    bytes_up = status["NewX_AVM_DE_TotalBytesSent64"]

    update_rra(prefs["rra_filename"], str(bytes_up), str(bytes_down))
Esempio n. 4
0
def main():
    prefs = read_configuration(
        os.path.join(os.path.dirname(__file__), "fritz-speed.ini"))
    for g in prefs["graphs"]:
        # graph the data for information about parameters see https://oss.oetiker.ch/rrdtool/doc/rrdgraph.en.html
        rrdtool.graph(
            g["filename"],
            "-t",
            g["title"],
            "-w",
            g["graph_width"],
            "-h",
            g["graph_height"],
            "-s",
            "end-" + g["interval"] + "s",
            "DEF:bytes-up=" + g["rra_filename"] + ":bytes-up:AVERAGE",
            "DEF:bytes-down=" + g["rra_filename"] + ":bytes-down:AVERAGE",
            prefs["graph_type_down"] + ":bytes-down#" +
            prefs["graph_color_down"] + ":Downloadrate",
            prefs["graph_type_up"] + ":bytes-up#" + prefs["graph_color_up"] +
            ":Uploadrate",
            "-v",
            "Transfer rate [B/s]",
        )
Esempio n. 5
0
four datasources which are counters
three RRA for one day, one week and one month
"""

import os

import rrdtool
from common import read_configuration


def main():
    up_speed_max = int(prefs["uplink_mbit"]) * 1024 * 1024 * 1.1
    down_speed_max = int(prefs["downlink_mbit"]) * 1024 * 1024 * 1.1

    rrdtool.create(
        prefs["rra_filename"],
        "--step",
        "60",
        "DS:bytes-up:COUNTER:500:0:" + str(up_speed_max),
        "DS:bytes-down:COUNTER:500:0:" + str(down_speed_max),
        "RRA:AVERAGE:0.8:1:1440",
        "RRA:AVERAGE:0.8:10:1008",
        "RRA:AVERAGE:0.8:60:5040",
    )


if __name__ == "__main__":
    prefs = read_configuration(
        os.path.join(os.path.dirname(__file__), "fritz-speed.ini"))
    main()
Esempio n. 6
0
File: main.py Progetto: viogp/shark
def run_shark_hpc(particles, *args):
    """
    - Handler function for running PSO on Shark on a SLURM based cluster.
    - Swarm size and number of iterations need to be set within the script for now
    - Function needs the relative path to a Shark config file under the -c option
    - For now the subprocess call within must be altered if you are changing shark submit options
    - To find appropriate memory allocations peruse the initial output lines of each particle
    """

    global count

    opts, space, subvols, statTest = args

    # Prepare the file that will be used by the shark submission scripts
    # to determine which values shark will be run for. We put a final \n so the
    # final line gets properly counted by wc (used by shark-submit)
    shark_options = [
        ' '.join([
            '-o "%s"' % option
            for option in _to_shark_options(particle, space)
        ]) for particle in particles
    ]
    positions_fname = tempfile.mktemp('particle_positions.txt')
    logger.info('Creating particle positions file at %s', positions_fname)
    with open(positions_fname, 'wt') as f:
        f.write('\n'.join(shark_options) + '\n')

    # Submit the execution of multiple shark instances, one for each particle
    job_name = 'PSOSMF_%d' % count
    shark_output_base = os.path.join(opts.outdir, job_name)
    cmdline = [
        './shark-submit', '-S', opts.shark_binary, '-w', opts.walltime, '-n',
        job_name, '-O', shark_output_base, '-E', positions_fname, '-V',
        ' '.join(map(str, subvols))
    ]
    if opts.account:
        cmdline += ['-a', opts.account]
    if opts.queue:
        cmdline += ['-Q', opts.queue]
    if opts.nodes:
        cmdline += ['-N', str(opts.nodes)]
    else:
        cmdline += ['-m', opts.memory, '-c', str(opts.cpus)]
    cmdline.append(opts.config)
    _exec_shark('Queueing PSO particles', cmdline)

    # Actually wait for the jobs to finish...
    while count_jobs(job_name) > 0:
        time.sleep(10)

    ss = len(particles)
    fx = np.zeros([ss, 3])
    for i in range(ss):
        _, simu, model, _ = common.read_configuration(opts.config)
        particle_outdir = os.path.join(shark_output_base, str(i))
        modeldir = common.get_shark_output_dir(particle_outdir, simu, model)
        for j, constraint in enumerate(opts.constraints):
            y_obs, y_mod, err = constraint.get_data(modeldir, subvols)
            fx[i, j] = statTest(y_obs, y_mod, err)
        if not opts.keep:
            shutil.rmtree(particle_outdir)

    fx = np.sum(fx, 1)
    logger.info('Particles %r evaluated to %r', particles, fx)

    # this global count just tracks the number of iterations so they can be saved to different files
    count += 1

    return fx
Esempio n. 7
0
File: main.py Progetto: viogp/shark
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-c',
        '--config',
        help='Configuration file used as the basis for running shark',
        type=_abspath)
    parser.add_argument(
        '-v',
        '--subvolumes',
        help='Comma- and dash-separated list of subvolumes to process',
        default='0')
    parser.add_argument(
        '-b',
        '--shark-binary',
        help=
        'The shark binary to use, defaults to either "shark" or "../build/shark"',
        default=None,
        type=_abspath)
    parser.add_argument('-o',
                        '--outdir',
                        help='Auxiliary output directory, defaults to .',
                        default=_abspath('.'),
                        type=_abspath)
    parser.add_argument('-k',
                        '--keep',
                        help='Keep temporary output files',
                        action='store_true')

    pso_opts = parser.add_argument_group('PSO options')
    pso_opts.add_argument(
        '-s',
        '--swarm-size',
        help=
        'Size of the particle swarm. Defaults to 10 + sqrt(D) * 2 (D=number of dimensions)',
        type=int,
        default=None)
    pso_opts.add_argument(
        '-m',
        '--max-iterations',
        help=
        'Maximum number of iterations to reach before giving up, defaults to 20',
        default=10,
        type=int)
    pso_opts.add_argument(
        '-S',
        '--space-file',
        help='File with the search space specification, defaults to space.txt',
        default='space.txt',
        type=_abspath)
    pso_opts.add_argument(
        '-t',
        '--stat-test',
        help=
        'Stat function used to calculate the value of a particle, defaults to student-t',
        default='student-t',
        choices=list(analysis.stat_tests.keys()))
    pso_opts.add_argument(
        '-x',
        '--constraints',
        default='HIMF,SMF_z0,SMF_z1',
        help=
        ("Comma-separated list of constraints, any of HIMF, SMF_z0 or SMF_z1, defaults to 'HIMF,SMF_z0,SMF_z1'. "
         "Can specify a domain range after the name (e.g., 'SMF_z0(8-11)')"))

    hpc_opts = parser.add_argument_group('HPC options')
    hpc_opts.add_argument('-H',
                          '--hpc-mode',
                          help='Enable HPC mode',
                          action='store_true')
    hpc_opts.add_argument('-C',
                          '--cpus',
                          help='Number of CPUs per shark instance',
                          default=1,
                          type=int)
    hpc_opts.add_argument('-M',
                          '--memory',
                          help='Memory needed by each shark instance',
                          default='1500m')
    hpc_opts.add_argument('-N',
                          '--nodes',
                          help='Number of nodes to use',
                          default=None,
                          type=int)
    hpc_opts.add_argument('-a',
                          '--account',
                          help='Submit jobs using this account',
                          default=None)
    hpc_opts.add_argument('-q',
                          '--queue',
                          help='Submit jobs to this queue',
                          default=None)
    hpc_opts.add_argument(
        '-w',
        '--walltime',
        help='Walltime for each submission, defaults to 1:00:00',
        default='1:00:00')

    opts = parser.parse_args()

    if not opts.config:
        parser.error('-c option is mandatory but missing')

    if opts.shark_binary and not common.has_program(opts.shark_binary):
        parser.error(
            "shark binary '%s' not found, specify a correct one via -b" %
            opts.shark_binary)
    elif not opts.shark_binary:
        for candidate in ['shark', '../build/shark']:
            if not common.has_program(candidate):
                continue
            opts.shark_binary = _abspath(candidate)
            break
        if not opts.shark_binary:
            parser.error("No shark binary found, specify one via -b")

    _, _, _, redshift_file = common.read_configuration(opts.config)
    redshift_table = common._redshift_table(redshift_file)
    subvols = common.parse_subvolumes(opts.subvolumes)

    setup_logging(opts.outdir)

    opts.constraints = constraints.parse(opts.constraints)
    for c in opts.constraints:
        c.redshift_table = redshift_table

    # Read search space specification, which is a comma-separated multiline file,
    # each line containing the following elements:
    #
    # param_name, plot_label, is_log, lower_bound, upper_bound
    space = analysis.load_space(opts.space_file)

    ss = opts.swarm_size
    if ss is None:
        ss = 10 + int(2 * math.sqrt(len(space)))

    args = (opts, space, subvols, analysis.stat_tests[opts.stat_test])

    if opts.hpc_mode:
        procs = 0
        f = run_shark_hpc
    else:
        n_cpus = multiprocessing.cpu_count()
        procs = min(n_cpus, ss)
        f = run_shark

    logger.info('-----------------------------------------------------')
    logger.info('Runtime information')
    logger.info('    shark binary: %s', opts.shark_binary)
    logger.info('    Base configuration file: %s', opts.config)
    logger.info('    Subvolumes to use: %r', subvols)
    logger.info('    Output directory: %s', opts.outdir)
    logger.info('    Keep temporary output files: %d', opts.keep)
    logger.info("PSO information:")
    logger.info('    Search space parameters: %s', ' '.join(space['name']))
    logger.info('    Swarm size: %d', ss)
    logger.info('    Maximum iterations: %d', opts.max_iterations)
    logger.info('    Lower bounds: %r', space['lb'])
    logger.info('    Upper bounds: %r', space['ub'])
    logger.info('    Test function: %s', opts.stat_test)
    logger.info('Constraints:')
    for c in opts.constraints:
        logger.info('%10s [%.1f - %.1f]' %
                    (c.__class__.__name__, c.domain[0], c.domain[1]))
    logger.info('HPC mode: %d', opts.hpc_mode)
    if opts.hpc_mode:
        logger.info('    Account used to submit: %s',
                    opts.account if opts.account else '')
        logger.info('    Queue to submit: %s',
                    opts.queue if opts.queue else '')
        logger.info('    Walltime per submission: %s', opts.walltime)
        logger.info('    CPUs per instance: %d', opts.cpus)
        logger.info('    Memory per instance: %s', opts.memory)
        logger.info('    Nodes to use: %s', opts.nodes)

    while True:
        answer = raw_input('\nAre these parameters correct? (Yes/no): ')
        if answer:
            if answer.lower() in ('n', 'no'):
                logger.info(
                    'Not starting PSO, check your configuration and try again')
                return
            print("Please answer 'yes' or 'no'")
            continue
        break

    # Directory where we store the intermediate results
    tracksdir = os.path.join(opts.outdir, 'tracks')
    try:
        os.makedirs(tracksdir)
    except OSError:
        pass

    # Go, go, go!
    logger.info('Starting PSO now')
    tStart = time.time()
    if opts.hpc_mode:
        os.chdir('../hpc')
    xopt, fopt = pso.pso(f,
                         space['lb'],
                         space['ub'],
                         args=args,
                         swarmsize=ss,
                         maxiter=opts.max_iterations,
                         processes=procs,
                         dumpfile_prefix=os.path.join(tracksdir, 'track_%03d'))
    tEnd = time.time()

    global count
    logger.info('Number of iterations = %d', count)
    logger.info('xopt = %r', xopt)
    logger.info('fopt = %r', fopt)
    logger.info('PSO finished in %.3f [s]', tEnd - tStart)
Esempio n. 8
0
four datasources which are counters
three RRA for one day, one week and one mont
"""

import fritzconnection
import rrdtool
import os

from common import read_configuration

def get_link_speed():
    fc = fritzconnection.FritzConnection()
    status = fc.call_action('WANCommonInterfaceConfig', 'GetCommonLinkProperties')
    downstream = status['NewLayer1DownstreamMaxBitRate'] / 8.
    upstream = status['NewLayer1UpstreamMaxBitRate'] / 8.
    return (upstream, downstream)

def main():
    link_speeds = get_link_speed()
    max_speeds = tuple([speed*1.1 for speed in link_speeds])
    rrdtool.create(prefs['rra_filename'], '--step', '60',
        'DS:bytes-up:COUNTER:500:0:'+str(max_speeds[0]),
        'DS:bytes-down:COUNTER:500:0:'+str(max_speeds[1]),
        'RRA:AVERAGE:0.8:1:1440',
        'RRA:AVERAGE:0.8:10:1008',
        'RRA:AVERAGE:0.8:60:5040' )

if __name__ == '__main__':
    prefs = read_configuration('fritz-speed.ini')
    main()