Ejemplo n.º 1
0
def get_out_dir(fname, out_dir):

    #print(fname, out_dir)
    if out_dir == '' or out_dir[0] != '/':
        dir_name = os.path.dirname(fname)
        out_dir = dir_name + '/' + out_dir

    if len(out_dir) > 0 and out_dir[-1] != '/':
        out_dir += '/'

    mkdir_p(out_dir)

    return out_dir
Ejemplo n.º 2
0
def config_check_and_log(test_id):
    execute(check_config, hosts=['MAIN'])  # use a dummy host here and below

    # create sub directory for test id prefix
    mkdir_p(test_id)
    execute(log_varying_params, test_id, test_id, hosts=['MAIN'])

    # make copy of config and dump TPCONF variables
    # wrap these in try-except cause they don't exist for old versions
    try:
        execute(backup_config, test_id, hosts=['MAIN'])
        execute(dump_config_vars, test_id, hosts=['MAIN'])
        execute(get_nameip_map, test_id, hosts=['MAIN'])
    except:
        pass
Ejemplo n.º 3
0
def config_check_and_log(test_id):
    execute(check_config, hosts=['MAIN'])  # use a dummy host here and below

    # create sub directory for test id prefix
    mkdir_p(test_id)
    execute(log_varying_params, test_id, test_id, hosts=['MAIN'])

    # make copy of config and dump TPCONF variables
    # wrap these in try-except cause they don't exist for old versions
    try:
        execute(backup_config, test_id, hosts=['MAIN'])
        execute(dump_config_vars, test_id, hosts=['MAIN'])
        execute(get_nameip_map, test_id, hosts=['MAIN'])
    except:
        pass
Ejemplo n.º 4
0
def analyse_2d_density(
        exp_list='experiments_completed.txt',
        res_dir='',
        out_dir='',
        source_filter='',
        min_values='3',
        xmetric='throughput',
        ymetric='tcprtt',
        variables='',
        out_name='',
        xmin='0',
        xmax='0',
        ymin='0',
        ymax='0',
        lnames='',
        group_by='aqm',
        replot_only='0',
        pdf_dir='',
        stime='0.0',
        etime='0.0',
        ts_correct='1',
        smoothed='1',
        link_len='0',
        plot_params='',
        plot_script='',
        xstat_index='',
        ystat_index='',
        dupacks='0',
        cum_ackseq='1',
        merge_data='0',
        #sburst='1', eburst='0', test_id_prefix='[0-9]{8}\-[0-9]{6}_experiment_',
        sburst='1',
        eburst='0',
        test_id_prefix='exp_[0-9]{8}\-[0-9]{6}_',
        slowest_only='0',
        query_host=''):
    "2d density / ellipse plot for different experiments"

    test_id_pfx = ''

    check = get_metric_params(xmetric, smoothed, ts_correct)
    if check == None:
        abort('Unknown metric %s specified with xmetric' % xmetric)
    check = get_metric_params(ymetric, smoothed, ts_correct)
    if check == None:
        abort('Unknown metric %s specified with ymetric' % ymetric)

    #if source_filter == '':
    #    abort('Must specify at least one source filter')

    if len(source_filter.split(';')) > 12:
        abort('Cannot have more than 12 filters')

    # XXX more param checking

    # make sure res_dir has valid form (out_dir is handled by extract methods)
    res_dir = valid_dir(res_dir)

    # Initialise source filter data structure
    sfil = SourceFilter(source_filter)

    # read test ids
    experiments = read_experiment_ids(exp_list)

    # get path based on first experiment id
    dir_name = get_first_experiment_path(experiments)

    # if we haven' got the extracted data run extract method(s) first
    if res_dir == '':
        for experiment in experiments:

            (ex_function,
             kwargs) = get_extract_function(xmetric,
                                            link_len,
                                            xstat_index,
                                            sburst=sburst,
                                            eburst=eburst,
                                            slowest_only=slowest_only,
                                            query_host=query_host)

            (dummy, out_files,
             out_groups) = ex_function(test_id=experiment,
                                       out_dir=out_dir,
                                       source_filter=source_filter,
                                       replot_only=replot_only,
                                       ts_correct=ts_correct,
                                       **kwargs)

            (ex_function,
             kwargs) = get_extract_function(ymetric,
                                            link_len,
                                            ystat_index,
                                            sburst=sburst,
                                            eburst=eburst,
                                            slowest_only=slowest_only,
                                            query_host=query_host)

            (dummy, out_files,
             out_groups) = ex_function(test_id=experiment,
                                       out_dir=out_dir,
                                       source_filter=source_filter,
                                       replot_only=replot_only,
                                       ts_correct=ts_correct,
                                       **kwargs)

        if out_dir == '' or out_dir[0] != '/':
            res_dir = dir_name + '/' + out_dir
        else:
            res_dir = out_dir

    else:
        if res_dir[0] != '/':
            res_dir = dir_name + '/' + res_dir

    # make sure we have trailing slash
    res_dir = valid_dir(res_dir)

    if pdf_dir == '':
        pdf_dir = res_dir
    else:
        if pdf_dir[0] != '/':
            pdf_dir = dir_name + '/' + pdf_dir
        pdf_dir = valid_dir(pdf_dir)
        # if pdf_dir specified create if it doesn't exist
        mkdir_p(pdf_dir)

    #
    # build match string from variables
    #

    (match_str, match_str2) = build_match_strings(experiments[0], variables,
                                                  test_id_prefix)

    #
    # filter out the experiments to plot, generate x-axis labels, get test id prefix
    #

    (fil_experiments, test_id_pfx,
     dummy) = filter_experiments(experiments, match_str, match_str2)

    #
    # get groups based on group_by variable
    #

    group_idx = 1
    levels = {}
    groups = []
    leg_names = []
    _experiments = []
    for experiment in fil_experiments:
        level = ''
        add_exp = True
        for g in group_by.split(';'):
            p = experiment.find(g)
            if p > -1:
                s = experiment.find('_', p)
                s += 1
                e = experiment.find('_', s)
                level += g + ':' + experiment[s:e] + ' '
            else:
                add_exp = False
                break

        # remove the final space from the string
        level = level[:-1]

        if add_exp == True:
            _experiments.append(experiment)
            #print('level: ' + level)

            if level not in levels:
                levels[level] = group_idx
                group_idx += 1
                leg_names.append(level)

            if merge_data == '1':
                groups.append(levels[level])
            else:
                for i in range(len(source_filter.split(';'))):
                    groups.append(levels[level])

    fil_experiments = _experiments

    #
    # get metric parameters and list of data files
    #

    # get the metric parameter for both x and y
    x_axis_params = get_metric_params(xmetric, smoothed, ts_correct,
                                      xstat_index, dupacks, cum_ackseq,
                                      slowest_only)
    y_axis_params = get_metric_params(ymetric, smoothed, ts_correct,
                                      ystat_index, dupacks, cum_ackseq,
                                      slowest_only)

    x_ext = x_axis_params[0]
    y_ext = y_axis_params[0]

    # if we merge responders make sure we only use the merged files
    if merge_data == '1':
        # reset source filter so we match the merged file
        sfil.clear()
        sfil = SourceFilter('S_0.0.0.0_0')

    x_files = []
    y_files = []
    for experiment in fil_experiments:
        _x_files = []
        _y_files = []
        _x_ext = x_ext
        _y_ext = y_ext

        _files = get_testid_file_list('', experiment, _x_ext, 'LC_ALL=C sort',
                                      res_dir)
        if merge_data == '1':
            _x_ext += '.all'
            _files = merge_data_files(_files)
        _x_files += _files

        _files = get_testid_file_list('', experiment, _y_ext, 'LC_ALL=C sort',
                                      res_dir)
        if merge_data == '1':
            _y_ext += '.all'
            _files = merge_data_files(_files)
        _y_files += _files

        match_str = '.*_([0-9\.]*_[0-9]*_[0-9\.]*_[0-9]*)[0-9a-z_.]*' + _x_ext
        for f in _x_files:
            #print(f)
            res = re.search(match_str, f)
            #print(res.group(1))
            if res and sfil.is_in(res.group(1)):
                # only add file if enough data points
                rows = int(
                    local('wc -l %s | awk \'{ print $1 }\'' % f, capture=True))
                if rows > int(min_values):
                    x_files.append(f)

        match_str = '.*_([0-9\.]*_[0-9]*_[0-9\.]*_[0-9]*)[0-9a-z_.]*' + _y_ext
        for f in _y_files:
            # print(f)
            res = re.search(match_str, f)
            if res and sfil.is_in(res.group(1)):
                # only add file if enough data points
                rows = int(
                    local('wc -l %s | awk \'{ print $1 }\'' % f, capture=True))
                if rows > int(min_values):
                    y_files.append(f)

    yindexes = [str(x_axis_params[2]), str(y_axis_params[2])]
    yscalers = [str(x_axis_params[3]), str(y_axis_params[3])]
    aggr_flags = [x_axis_params[5], y_axis_params[5]]
    diff_flags = [x_axis_params[6], y_axis_params[6]]

    if lnames != '':
        lnames_arr = lnames.split(';')
        if len(lnames_arr) != len(leg_names):
            abort(
                'Number of legend names must be qual to the number of source filters'
            )
        leg_names = lnames_arr

    print(x_files)
    print(y_files)
    print(groups)
    print(leg_names)

    #
    # pass the data files and auxilary info to plot function
    #

    if out_name != '':
        oprefix = out_name + '_' + test_id_pfx + '_' + xmetric + '_' + ymetric
    else:
        oprefix = test_id_pfx + '_' + xmetric + '_' + ymetric
    title = oprefix

    plot_2d_density(title, x_files, y_files, x_axis_params[1],
                    y_axis_params[1], yindexes, yscalers, 'pdf', oprefix,
                    pdf_dir, x_axis_params[4], y_axis_params[4], aggr_flags,
                    diff_flags, xmin, xmax, ymin, ymax, stime, etime, groups,
                    leg_names, plot_params, plot_script)

    # done
    puts('\n[MAIN] COMPLETED analyse_2d_density %s \n' % test_id_pfx)
Ejemplo n.º 5
0
def analyse_cmpexp(
        exp_list='experiments_completed.txt',
        res_dir='',
        out_dir='',
        source_filter='',
        min_values='3',
        omit_const='0',
        metric='throughput',
        ptype='box',
        variables='',
        out_name='',
        ymin='0',
        ymax='0',
        lnames='',
        group_by_prefix='0',
        omit_const_xlab_vars='0',
        replot_only='0',
        pdf_dir='',
        stime='0.0',
        etime='0.0',
        ts_correct='1',
        smoothed='1',
        link_len='0',
        plot_params='',
        plot_script='',
        stat_index='',
        dupacks='0',
        cum_ackseq='1',
        merge_data='0',
        sburst='1',
        #eburst='0', test_id_prefix='[0-9]{8}\-[0-9]{6}_experiment_',
        eburst='0',
        test_id_prefix='exp_[0-9]{8}\-[0-9]{6}_',
        slowest_only='0',
        res_time_mode='0',
        query_host=''):
    "Compare metrics for different experiments"

    if ptype != 'box' and ptype != 'mean' and ptype != 'median':
        abort('ptype must be either box, mean or median')

    check = get_metric_params(metric, smoothed, ts_correct)
    if check == None:
        abort('Unknown metric %s specified' % metric)

    if source_filter == '':
        abort('Must specify at least one source filter')

    if len(source_filter.split(';')) > 12:
        abort('Cannot have more than 12 filters')

    # prevent wrong use of res_time_mode
    if metric != 'restime' and res_time_mode != '0':
        res_time_mode = '0'
    if ptype == 'box' and res_time_mode == '2':
        res_time_mode = '0'

    # XXX more param checking

    # Initialise source filter data structure
    sfil = SourceFilter(source_filter)

    # read test ids
    experiments = read_experiment_ids(exp_list)

    # get path based on first experiment id
    dir_name = get_first_experiment_path(experiments)

    # if we haven' got the extracted data run extract method(s) first
    if res_dir == '':
        for experiment in experiments:

            (ex_function,
             kwargs) = get_extract_function(metric,
                                            link_len,
                                            stat_index,
                                            sburst=sburst,
                                            eburst=eburst,
                                            slowest_only=slowest_only,
                                            query_host=query_host)

            (dummy, out_files,
             out_groups) = ex_function(test_id=experiment,
                                       out_dir=out_dir,
                                       source_filter=source_filter,
                                       replot_only=replot_only,
                                       ts_correct=ts_correct,
                                       **kwargs)

        if out_dir == '' or out_dir[0] != '/':
            res_dir = dir_name + '/' + out_dir
        else:
            res_dir = out_dir
    else:
        if res_dir[0] != '/':
            res_dir = dir_name + '/' + res_dir

    # make sure we have trailing slash
    res_dir = valid_dir(res_dir)

    if pdf_dir == '':
        pdf_dir = res_dir
    else:
        if pdf_dir[0] != '/':
            pdf_dir = dir_name + '/' + pdf_dir
        pdf_dir = valid_dir(pdf_dir)
        # if pdf_dir specified create if it doesn't exist
        mkdir_p(pdf_dir)

    #
    # build match string from variables
    #

    (match_str, match_str2) = build_match_strings(experiments[0], variables,
                                                  test_id_prefix)

    #
    # filter out the experiments to plot, generate x-axis labels, get test id prefix
    #

    (fil_experiments, test_id_pfx,
     xlabs) = filter_experiments(experiments, match_str, match_str2)

    #
    # get out data files based on filtered experiment list and source_filter
    #

    (ext, ylab, yindex, yscaler, sep, aggr,
     diff) = get_metric_params(metric, smoothed, ts_correct, stat_index,
                               dupacks, cum_ackseq, slowest_only)

    if res_time_mode == '1':
        plot_params += ' NOMINAL_RES_TIME="1"'
    if res_time_mode == '2':
        if ptype == 'median':
            ylab = 'Median resp time / nominal resp time'
        elif ptype == 'mean':
            ylab = 'Mean resp time / nominal resp time'
        plot_params += ' RATIO_RES_TIME="1"'

    leg_names = source_filter.split(';')

    # if we merge responders make sure we only use the merged files
    if merge_data == '1':
        # set label to indicate merged data
        leg_names = ['Merged data']
        # reset source filter so we match the merged file
        sfil.clear()
        source_filter = 'S_0.0.0.0_0'
        sfil = SourceFilter(source_filter)

    file_names = []
    for experiment in fil_experiments:
        out_files = {}
        _ext = ext

        files = get_testid_file_list('', experiment, '%s' % _ext,
                                     'LC_ALL=C sort', res_dir)
        if merge_data == '1':
            # change extension
            _ext += '.all'
            files = merge_data_files(files)

        #print(files)
        match_str = '.*_([0-9\.]*_[0-9]*_[0-9\.]*_[0-9]*)[0-9a-z_.]*' + _ext
        for f in files:
            # print(f)
            res = re.search(match_str, f)
            #print(res.group(1))
            if res and sfil.is_in(res.group(1)):
                # only add file if enough data points
                rows = int(
                    local('wc -l %s | awk \'{ print $1 }\'' % f, capture=True))
                if rows > int(min_values):
                    out_files[res.group(1)] = f

        #print(out_files)
        #print(leg_names)
        if len(out_files) < len(leg_names):
            abort(
                'No data files for some of the source filters for experiment %s'
                % experiment)

        sorted_files = sort_by_flowkeys(out_files, source_filter)

        for name, file_name in sorted_files:
            file_names.append(file_name)

    if group_by_prefix == '1':
        # group by test prefix (and flow)

        # first, get all test id prefixes
        test_id_pfxs = {}
        for experiment in fil_experiments:
            res = re.search(match_str2, experiment)
            if res:
                test_id_pfxs[res.group(1)] = 1

        # second, sort files so that same parameter combinations for different
        # prefixes are together
        # if we have multiple prefixes, create legend entry for each
        # prefix+flow combination
        _file_names = [''] * len(file_names)
        _leg_names = []
        pfx_cnt = len(test_id_pfxs)
        i = 0
        j = -1
        last_pfx = ''
        for name in file_names:
            for p in test_id_pfxs:
                if name.find(p) > -1:
                    curr_pfx = p
                    break

            if curr_pfx != last_pfx:
                i = 0
                j += 1
                for l in leg_names:
                    _leg_names.append(curr_pfx + '-' + l)

            _file_names[i * pfx_cnt + j] = name

            i += 1
            last_pfx = curr_pfx

        file_names = _file_names
        leg_names = _leg_names

        # remove duplicates in the x-axis labels
        xlabs = list(set(xlabs))

    if lnames != '':
        lnames_arr = lnames.split(';')
        if len(lnames_arr) != len(leg_names):
            abort(
                'Number of legend names must be qual to the number of source filters'
            )
        leg_names = lnames_arr

    # filter out unchanged variables in the x labels (need at least 2 labels)
    if omit_const_xlab_vars == '1' and len(xlabs) > 1:

        xlabs_arrs = {}
        xlabs_changed = {}

        for i in range(len(xlabs)):
            xlabs_arrs[i] = xlabs[i].split('\n')

        for i in range(len(xlabs_arrs[0])):
            changed = False
            xlab_var = xlabs_arrs[0][i]
            for j in range(1, len(xlabs)):
                if xlabs_arrs[j][i] != xlab_var:
                    changed = True
                    break

            xlabs_changed[i] = changed

        for i in range(len(xlabs)):
            tmp = []
            for j in range(len(xlabs_arrs[i])):
                if xlabs_changed[j]:
                    tmp.append(xlabs_arrs[i][j].replace('_', ' ', 1))

            xlabs[i] = '\n'.join(tmp)

    print(leg_names)
    print(file_names)

    #
    # pass the data files and auxilary info to plot function
    #

    if out_name != '':
        oprefix = out_name + '_' + test_id_pfx + '_' + metric + '_' + ptype
    else:
        oprefix = test_id_pfx + '_' + metric + '_' + ptype
    title = oprefix

    plot_cmpexp(title, file_names, xlabs, ylab, yindex, yscaler, 'pdf',
                oprefix, pdf_dir, sep, aggr, diff, omit_const, ptype, ymin,
                ymax, leg_names, stime, etime, plot_params, plot_script)

    # done
    puts('\n[MAIN] COMPLETED analyse_cmpexp %s \n' % test_id_pfx)
Ejemplo n.º 6
0
def get_clock_offsets(exp_list='experiments_completed.txt',
                      test_id='',
                      pkt_filter='',
                      baseline_host='',
                      out_dir=''):
    "Get clock offsets for all hosts"

    if len(out_dir) > 0 and out_dir[-1] != '/':
        out_dir += '/'

    if test_id == '':
        try:
            with open(exp_list) as f:
                test_id_arr = f.readlines()
        except IOError:
            abort('Cannot open file %s' % exp_list)
    else:
        test_id_arr = test_id.split(';')

    if len(test_id_arr) == 0 or test_id_arr[0] == '':
        abort('Must specify test_id parameter')

    # specify complete tcpdump parameter list
    tcpdump_filter = '-tt -r - -n ' + pkt_filter

    for test_id in test_id_arr:
        test_id = test_id.rstrip()

        # first find tcpdump files
        tcpdump_files = get_testid_file_list('', test_id, '_ctl.dmp.gz', '')

        if len(tcpdump_files) == 0:
            warn('No tcpdump files for control interface for %s' % test_id)
            continue

        # if we have tcpdumps for control interface we can assume broadcast ping
        # was enabled

        dir_name = os.path.dirname(tcpdump_files[0])
        # then look for tpconf_vars.log.gz file in that directory
        var_file = local('find -L %s -name "*tpconf_vars.log.gz"' % dir_name,
                         capture=True)

        bc_addr = ''
        router_name = ''

        if len(var_file) > 0:
            # new approach without using config.py
            # XXX no caching here yet, assume we only generate clockoffset file once
            # per experiment

            # unzip archived file
            local('gzip -cd %s > %s' % (var_file, TMP_CONF_FILE))

            # load the TPCONF_variables into oldconfig
            oldconfig = imp.load_source('oldconfig', TMP_CONF_FILE)

            # remove temporary unzipped file
            try:
                os.remove(TMP_CONF_FILE)
                os.remove(TMP_CONF_FILE +
                          'c')  # remove the compiled file as well
            except OSError:
                pass

            try:
                bc_addr = oldconfig.TPCONF_bc_ping_address
            except AttributeError:
                pass

            router_name = oldconfig.TPCONF_router[0].split(':')[0]

        else:
            # old approach using config.py

            try:
                bc_addr = config.TPCONF_bc_ping_address
            except AttributeError:
                pass

            router_name = config.TPCONF_router[0].split(':')[0]

        if bc_addr == '':
            # assume default multicast address
            bc_addr = '224.0.1.199'

        # specify complete tcpdump parameter list
        if pkt_filter != '':
            tcpdump_filter = '-tt -r - -n ' + pkt_filter
        else:
            tcpdump_filter = '-tt -r - -n ' + 'icmp and dst host ' + bc_addr

        if baseline_host == '':
            baseline_host = router_name

        #
        # now read timestamps from each host's tcpdump
        #

        # map of host names (or IPs) and sequence numbers to timestamps
        host_times = {}
        for tcpdump_file in tcpdump_files:
            host = local(
                'echo %s | sed "s/.*_\([a-z0-9\.]*\)_ctl.dmp.gz/\\1/"' %
                tcpdump_file,
                capture=True)
            host_times[host] = {}
            #print(host)
            #print(host_times)

            # We pipe gzcat through to tcpdump. Note, since tcpdump exits early
            # (due to "-c num_samples") gzcat's pipe will collapse and gzcat
            # will complain bitterly. So we dump its stderr to stderrhack.
            init_zcat = Popen(['zcat ' + tcpdump_file],
                              stdin=None,
                              stdout=PIPE,
                              stderr=stderrhack,
                              shell=True)
            init_tcpdump = Popen(['tcpdump ' + tcpdump_filter],
                                 stdin=init_zcat.stdout,
                                 stdout=PIPE,
                                 stderr=stderrhack,
                                 shell=True)

            for line in init_tcpdump.stdout.read().splitlines():
                _time = line.split(" ")[0]
                _seq = int(line.split(" ")[11].replace(',', ''))
                host_times[host][_seq] = _time

        #print(host_times)

        # get time differences and get host list
        diffs = {}
        ref_times = {}
        host_str = ''
        host_list = sorted(host_times.keys())
        # getting hosts from the config is problematic if different
        # experiments with different configs in same directory
        #host_list = sorted(config.TPCONF_router + config.TPCONF_hosts)

        for host in host_list:
            host_str += ' ' + host
            if host not in host_times:
                continue
            for seq in sorted(host_times[host].keys()):
                if seq not in diffs:
                    diffs[seq] = {}
                if baseline_host in host_times and \
                   seq in host_times[baseline_host]:
                    diffs[seq][host] = float(host_times[host][seq]) - \
                        float(host_times[baseline_host][seq])
                    ref_times[seq] = host_times[baseline_host][seq]
                else:
                    # this only happens:
                    # - if some other host has recorded more pings, OK we don't care
                    #   as this is only at the end after an experiment was finished
                    # - in old versions of TEACUP if TPCONF_router was modified
                    #warn('Cant find baseline host %s timestamp data number %i' %
                    # (baseline_host, str(seq)))
                    diffs[seq][host] = None
                    ref_times[seq] = None

        #print(diffs)

        if out_dir == '' or out_dir[0] != '/':
            dir_name = os.path.dirname(tcpdump_files[0])
            out_dir = dir_name + '/' + out_dir
        mkdir_p(out_dir)
        out_name = out_dir + test_id + CLOCK_OFFSET_FILE_EXT

        # write table of offsets (rows = time, cols = hosts)
        f = open(out_name, 'w')
        f.write('# ref_time' + host_str + '\n')
        for seq in sorted(diffs.keys()):
            if ref_times[seq] is not None:
                f.write(ref_times[seq])
            else:
                # this case should not never happen
                continue

            f.write(' ')

            for host in host_list:
                if host in diffs[seq] and diffs[seq][host] is not None:
                    f.write('{0:.6f}'.format(diffs[seq][host]))
                else:
                    f.write('NA')
                if host != host_list[-1]:
                    f.write(' ')
            f.write('\n')

        f.close()
Ejemplo n.º 7
0
def run_experiment(test_id='', test_id_pfx='', *args, **kwargs):

    do_init_os = kwargs.get('do_init_os', '1')
    ecn = kwargs.get('ecn', '0')
    tcp_cc_algo = kwargs.get('tcp_cc_algo', 'default')
    duration = kwargs.get('duration', '')
    if duration == '':
        abort('No experiment duration specified')

    # create sub directory for test id prefix
    mkdir_p(test_id_pfx)
    
    # remove <test_id>* files in <test_id_pfx> directory if exists
    file_pattern = test_id_pfx + "/" + test_id + "_*"
    
    for f in glob.glob(file_pattern):
        os.remove(f)

    # log experiment in started list
    local('echo "%s" >> experiments_started.txt' % test_id)

    puts('\n[MAIN] Starting experiment %s \n' % test_id)

    tftpboot_dir = ''
    try:
        tftpboot_dir = config.TPCONF_tftpboot_dir
    except AttributeError:
        pass

    # initialise
    if tftpboot_dir != '' and do_init_os == '1':
        execute(
            get_host_info,
            netint='0',
            hosts=config.TPCONF_router +
            config.TPCONF_hosts)
        execute(
            init_os_hosts,
            file_prefix=test_id_pfx,
            local_dir=test_id_pfx)  # reboot
        clear_type_cache()  # clear host type cache
        disconnect_all()  # close all connections
        time.sleep(30)  # give hosts some time to settle down (after reboot)

    # initialise topology
    try:
        switch = '' 
        port_prefix = ''
        port_offset = 0
        try:
            switch = config.TPCONF_topology_switch
            port_prefix = config.TPCONF_topology_switch_port_prefix
            port_offset = config.TPCONF_topology_switch_port_offset
        except AttributeError:
            pass 

        if config.TPCONF_config_topology == '1' and do_init_os == '1': 
            # we cannot call init_topology directly, as it is decorated with
            # runs_once. in experiment.py we have empty host list whereas if we
            # run init_topology from command line we have the -H host list. executing
            # an runs_once task with empty host list (hosts set in execute call), it
            # will only be executed for the first host, which is not what we
            # want. in contrast if we have a host list in context, execute will be
            # executed once for each host (hence we need runs_once when called from
            # the command line).

            # sequentially configure switch
            execute(init_topology_switch, switch, port_prefix, port_offset,
                   hosts = config.TPCONF_hosts)
            # configure hosts in parallel
            execute(init_topology_host, hosts = config.TPCONF_hosts)

    except AttributeError:
        pass

    file_cleanup(test_id_pfx)  # remove any .start files
    execute(
        get_host_info,
        netmac='0',
        hosts=config.TPCONF_router +
        config.TPCONF_hosts)
    execute(sanity_checks)
    execute(init_hosts, *args, **kwargs)

    # first is the legacy case with single router and single queue definitions
    # second is the multiple router case with several routers and several queue
    # definitions 
    if isinstance(config.TPCONF_router_queues, list):
        # start queues/pipes
        config_router_queues(config.TPCONF_router_queues, config.TPCONF_router, 
                             **kwargs)
        # show pipe setup
        execute(show_pipes, hosts=config.TPCONF_router)
    elif isinstance(config.TPCONF_router_queues, dict):
        for router in config.TPCONF_router_queues.keys():
            # start queues/pipes for router r
            config_router_queues(config.TPCONF_router_queues[router], [router], 
                                 **kwargs)
            # show pipe setup
            execute(show_pipes, hosts=[router])

    # log config parameters
    execute(
        log_config_params,
        file_prefix=test_id,
        local_dir=test_id_pfx,
        hosts=['MAIN'],
        *args,
        **kwargs)
    # log host tcp settings
    execute(
        log_host_tcp,
        file_prefix=test_id,
        local_dir=test_id_pfx,
        hosts=['MAIN'],
        *args,
        **kwargs)

    # start all loggers
    execute(
        start_loggers,
        file_prefix=test_id,
        local_dir=test_id_pfx,
        remote_dir=config.TPCONF_remote_dir)

    # Start broadcast ping and loggers (if enabled)
    try: 
        if config.TPCONF_bc_ping_enable == '1':
            # for multicast need IP of outgoing interface
            # which is router's control interface
            use_multicast = socket.gethostbyname(
                    config.TPCONF_router[0].split(':')[0])
 
            # get configured broadcast or multicast address
            bc_addr = '' 
            try:
                bc_addr = config.TPCONF_bc_ping_address
            except AttributeError:
                # use default multicast address
                bc_addr = '224.0.1.199'

            execute(
                start_bc_ping_loggers,
                file_prefix=test_id,
                local_dir=test_id_pfx,
                remote_dir=config.TPCONF_remote_dir,
                bc_addr=bc_addr)

            try:
                bc_ping_rate = config.TPCONF_bc_ping_rate
            except AttributeError:
                bc_ping_rate = '1'

            # start the broadcst ping on the first router
            execute(start_bc_ping,
                file_prefix=test_id,
                local_dir=test_id_pfx,
                remote_dir=config.TPCONF_remote_dir,
                bc_addr=bc_addr,
                rate=bc_ping_rate,
                use_multicast=use_multicast,
                hosts = [config.TPCONF_router[0]])
    except AttributeError:
        pass

    # start traffic generators
    sync_delay = 5.0
    start_time = datetime.datetime.now()
    total_duration = float(duration) + sync_delay
    for t, c, v in sorted(config.TPCONF_traffic_gens, cmp=_cmp_timekeys):

        try:
            # delay everything to have synchronised start
            next_time = float(t) + sync_delay
        except ValueError:
            abort('Traffic generator entry key time must be a float')

        # add the kwargs parameter to the call of _param
        v = re.sub("(V_[a-zA-Z0-9_-]*)", "_param('\\1', kwargs)", v)

        # trim white space at both ends
        v = v.strip()

        if v[-1] != ',':
            v = v + ','
        # add counter parameter
        v += ' counter="%s"' % c
        # add file prefix parameter
        v += ', file_prefix=test_id'
        # add remote dir
        v += ', remote_dir=\'%s\'' % config.TPCONF_remote_dir
        # add test id prefix to put files into correct directory
        v += ', local_dir=\'%s\'' % test_id_pfx
        # we don't need to check for presence of tools inside start functions
        v += ', check="0"'

        # set wait time until process is started
        now = datetime.datetime.now()
        dt_diff = now - start_time
        sec_diff = (dt_diff.days * 24 * 3600 + dt_diff.seconds) + \
            (dt_diff.microseconds / 1000000.0)
        if next_time - sec_diff > 0:
            wait = str(next_time - sec_diff)
        else:
            wait = '0.0'
        v += ', wait="' + wait + '"'

        _nargs, _kwargs = eval('_args(%s)' % v)

        # get traffic generator duration
        try:
            traffic_duration = _kwargs ['duration']
        except:
            traffic_duration = 0
        # find the largest total_duration possible
        if next_time + traffic_duration > total_duration:
            total_duration = next_time + traffic_duration

        execute(*_nargs, **_kwargs)

    # print process list
    print_proc_list()

    # wait until finished (add additional 5 seconds to be sure)
    total_duration = float(total_duration) + 5.0
    puts('\n[MAIN] Running experiment for %i seconds\n' % int(total_duration))
    time.sleep(total_duration)

    # shut everything down and get log data
    execute(stop_processes, local_dir=test_id_pfx)
    execute(
        log_queue_stats,
        file_prefix=test_id,
        local_dir=test_id_pfx,
        hosts=config.TPCONF_router)

    # log test id in completed list
    local('echo "%s" >> experiments_completed.txt' % test_id)

    # kill any remaining processes
    execute(kill_old_processes,
            hosts=config.TPCONF_router +
            config.TPCONF_hosts)

    # done
    puts('\n[MAIN] COMPLETED experiment %s \n' % test_id)
Ejemplo n.º 8
0
def get_clock_offsets(exp_list='experiments_completed.txt',
                      test_id='', pkt_filter='',
                      baseline_host='',
                      out_dir=''):
    "Get clock offsets for all hosts"

    if len(out_dir) > 0 and out_dir[-1] != '/':
        out_dir += '/'

    if test_id == '':
        try:
            with open(exp_list) as f:
                test_id_arr = f.readlines()
        except IOError:
            abort('Cannot open file %s' % exp_list)
    else:
        test_id_arr = test_id.split(';')

    if len(test_id_arr) == 0 or test_id_arr[0] == '':
        abort('Must specify test_id parameter')

    # specify complete tcpdump parameter list
    tcpdump_filter = '-tt -r - -n ' + pkt_filter

    for test_id in test_id_arr:
        test_id = test_id.rstrip()

        # first find tcpdump files
        tcpdump_files = get_testid_file_list('', test_id,
                                             '_ctl.dmp.gz', '')

        if len(tcpdump_files) == 0:
            warn('No tcpdump files for control interface for %s' % test_id)
            continue

        # if we have tcpdumps for control interface we can assume broadcast ping
        # was enabled

        dir_name = os.path.dirname(tcpdump_files[0])
        # then look for tpconf_vars.log.gz file in that directory 
        var_file = local('find -L %s -name "*tpconf_vars.log.gz"' % dir_name,
                         capture=True)

	bc_addr = ''
        router = ''

        if len(var_file) > 0:
            # new approach without using config.py
            # XXX no caching here yet, assume we only generate clockoffset file once
            # per experiment 

            # unzip archived file
            local('gzip -cd %s > %s' % (var_file, TMP_CONF_FILE))

            # load the TPCONF_variables into oldconfig
            oldconfig = imp.load_source('oldconfig', TMP_CONF_FILE)

            # remove temporary unzipped file 
            try:
                os.remove(TMP_CONF_FILE)
                os.remove(TMP_CONF_FILE + 'c') # remove the compiled file as well
            except OSError:
                pass

            try:
                bc_addr = oldconfig.TPCONF_bc_ping_address
            except AttributeError:
                pass

            router_name = oldconfig.TPCONF_router[0].split(':')[0]
            
        else:
            # old approach using config.py

            try:
                bc_addr = config.TPCONF_bc_ping_address
            except AttributeError:
                pass

            router_name = config.TPCONF_router[0].split(':')[0]

        if bc_addr == '':
            # assume default multicast address 
            bc_addr = '224.0.1.199' 

        # specify complete tcpdump parameter list
        if pkt_filter != '':
            tcpdump_filter = '-tt -r - -n ' + pkt_filter
        else:
            tcpdump_filter = '-tt -r - -n ' + 'icmp and dst host ' + bc_addr

        if baseline_host == '':
            baseline_host = router_name 

        #
        # now read timestamps from each host's tcpdump
        #

        # map of host names (or IPs) and sequence numbers to timestamps
        host_times = {}
        for tcpdump_file in tcpdump_files:
            host = local(
                'echo %s | sed "s/.*_\([a-z0-9\.]*\)_ctl.dmp.gz/\\1/"' %
                tcpdump_file,
                capture=True)
            host_times[host] = {}
            #print(host)
            #print(host_times)

            # We pipe gzcat through to tcpdump. Note, since tcpdump exits early
            # (due to "-c num_samples") gzcat's pipe will collapse and gzcat
            # will complain bitterly. So we dump its stderr to stderrhack.
            init_zcat = Popen(['zcat ' + tcpdump_file], stdin=None,
                              stdout=PIPE, stderr=stderrhack, shell=True)
            init_tcpdump = Popen(['tcpdump ' + tcpdump_filter],
                                 stdin=init_zcat.stdout,
                                 stdout=PIPE,
                                 stderr=stderrhack,
                                 shell=True)

            for line in init_tcpdump.stdout.read().splitlines():
                _time = line.split(" ")[0]
                _seq = int(line.split(" ")[11].replace(',', ''))
                host_times[host][_seq] = _time

        #print(host_times)

        # get time differences and get host list
        diffs = {}
        ref_times = {}
        host_str = ''
        host_list = sorted(host_times.keys())
        # getting hosts from the config is problematic if different 
        # experiments with different configs in same directory 
        #host_list = sorted(config.TPCONF_router + config.TPCONF_hosts)

        for host in host_list:
            host_str += ' ' + host
            if host not in host_times:
                continue
            for seq in sorted(host_times[host].keys()):
                if seq not in diffs:
                    diffs[seq] = {}
                if baseline_host in host_times and seq in host_times[
                        baseline_host]:
                    diffs[seq][host] = float(host_times[host][seq]) - \
                        float(host_times[baseline_host][seq])
                    ref_times[seq] = host_times[baseline_host][seq]
                else:
                    # this should only happen if TPCONF_router was
                    # modified
                    warn('Cant find baseline host %s timestamp data' % baseline_host)
                    diffs[seq][host] = None
                    ref_times[seq] = None

        #print(diffs)

        if out_dir == '' or out_dir[0] != '/':
              dir_name = os.path.dirname(tcpdump_files[0])
              out_dir = dir_name + '/' + out_dir
        mkdir_p(out_dir)
        out_name = out_dir + test_id + CLOCK_OFFSET_FILE_EXT

        # write table of offsets (rows = time, cols = hosts)
        f = open(out_name, 'w')
        f.write('# ref_time' + host_str + '\n')
        for seq in sorted(diffs.keys()):
            if ref_times[seq] is not None:
                f.write(ref_times[seq])
            else:
                # this case should not never happen
                continue

            f.write(' ')

            for host in host_list:
                if host in diffs[seq] and diffs[seq][host] is not None:
                    f.write('{0:.6f}'.format(diffs[seq][host]))
                else:
                    f.write('NA')
                if host != host_list[-1]:
                    f.write(' ')
            f.write('\n')

        f.close()
Ejemplo n.º 9
0
def plot_incast_ACK_series(title='',
                           files={},
                           ylab='',
                           yindex=2,
                           yscaler=1.0,
                           otype='',
                           oprefix='',
                           pdf_dir='',
                           sep=' ',
                           aggr='',
                           omit_const='0',
                           ymin=0,
                           ymax=0,
                           lnames='',
                           stime='0.0',
                           etime='0.0',
                           groups={},
                           sort_flowkey='1',
                           burst_sep='1.0',
                           sburst=1,
                           plot_params='',
                           plot_script='',
                           source_filter=''):

    file_names = []
    leg_names = []
    _groups = []

    # Pick up case where the user has supplied a number of legend names
    # that doesn't match the number of distinct trials (as opposed to the
    # number of bursts detected within each trial)
    if lnames != '':
        if len(lnames.split(";")) != len(files.keys()):
            abort(
                'Number of legend names must be the same as the number of flows'
            )

    if sort_flowkey == '1':
        sorted_files = sort_by_flowkeys(files, source_filter)
    else:
        sorted_files = files.items()

    #print("MAIN: sorted_files: %s" % sorted_files)

    # sort by group id
    sorted_files = sort_by_group_id2(sorted_files, groups)

    for name, file_name in sorted_files:
        # Create a sequence of burst-specific legend names,
        # derived from the flowID-based legend name.
        # Keep the .R code happy by creating a groups entry
        # for each burst-specific file.
        for burst_index in range(len(file_name)):
            leg_names.append(name + "%" + str(burst_index + sburst))
            file_names.append(file_name[burst_index])
            _groups.append(groups[file_name[burst_index]])

    if lnames != '':
        # Create a sequence of burst-specific legend names,
        # derived from the per-trial legend names provided by user.
        lname_arr_orig = lnames.split(';')
        lname_arr = []
        i = 0
        for name, file_name in sorted_files:
            for burst_index in range(len(file_name)):
                lname_arr.append(lname_arr_orig[i] + "%" +
                                 str(burst_index + sburst))
            i += 1

        if len(lname_arr) != len(leg_names):
            abort(
                'Number of legend names must be the same as the number of flows'
            )
        else:
            leg_names = lname_arr

    # get the directory name here if not specified
    if pdf_dir == '':
        pdf_dir = os.path.dirname(file_names[0]) + '/'
    else:
        pdf_dir = valid_dir(pdf_dir)
        # if no absolute path make it relative to experiment_dir
        # assume experiment dir is part before first slash
        if pdf_dir[0] != '/':
            pdf_dir = file_names[0].split('/')[0] + '/' + pdf_dir
        # if pdf_dir specified create if it doesn't exist
        mkdir_p(pdf_dir)

    if plot_script == '':
        plot_script = 'R CMD BATCH --vanilla %s/plot_bursts.R' % \
                       config.TPCONF_script_path

    # for a description of parameters see plot_time_series above
    #local('which R')
    local(
        'TC_TITLE="%s" TC_FNAMES="%s" TC_LNAMES="%s" TC_YLAB="%s" TC_YINDEX="%d" TC_YSCALER="%f" '
        'TC_SEP="%s" TC_OTYPE="%s" TC_OPREFIX="%s" TC_ODIR="%s" TC_AGGR="%s" TC_OMIT_CONST="%s" '
        'TC_YMIN="%s" TC_YMAX="%s" TC_STIME="%s" TC_ETIME="%s" TC_GROUPS="%s" %s '
        'TC_BURST_SEP=1 '
        '%s %s%s_plot_bursts.Rout' %
        (title, ','.join(file_names), ','.join(leg_names), ylab, yindex,
         yscaler, sep, otype, oprefix, pdf_dir,
         aggr, omit_const, ymin, ymax, stime, etime, ','.join(map(
             str, _groups)), plot_params, plot_script, pdf_dir, oprefix))

    if config.TPCONF_debug_level == 0:
        local('rm -f %s%s_plot_bursts.Rout' % (pdf_dir, oprefix))
Ejemplo n.º 10
0
def plot_dash_goodput(title='',
                      files={},
                      groups={},
                      ylab='',
                      otype='',
                      oprefix='',
                      pdf_dir='',
                      sep=' ',
                      ymin=0,
                      ymax=0,
                      lnames='',
                      stime='0.0',
                      etime='0.0',
                      plot_params='',
                      plot_script=''):

    file_names = []
    leg_names = []

    sorted_files = sorted(files.items())
    sorted_files = sort_by_group_id(sorted_files, groups)
    #print(sorted_files)

    for name, file_name in sorted_files:
        leg_names.append(name)
        file_names.append(file_name)

    if lnames != '':
        lname_arr = lnames.split(';')
        if len(lname_arr) != len(leg_names):
            abort(
                'Number of legend names must be the same as the number of flows'
            )
        else:
            leg_names = lname_arr

    # get the directory name here if not specified
    if pdf_dir == '':
        pdf_dir = os.path.dirname(file_names[0]) + '/'
    else:
        pdf_dir = valid_dir(pdf_dir)
        # if not absolute dir, make it relative to experiment_dir
        # assume experiment dir is part before first slash
        if pdf_dir != '/':
            pdf_dir = file_names[0].split('/')[0] + '/' + pdf_dir
        # if pdf_dir specified create if it doesn't exist
        mkdir_p(pdf_dir)

    if plot_script == '':
        plot_script = 'R CMD BATCH --vanilla %s/plot_dash_goodput.R' % \
                      config.TPCONF_script_path

    # interface between this code and the plot function are environment variables
    # the following variables are passed to plot function:
    # TC_TITLE:  character string that is plotted over the graph
    # TC_FNAMES: comma-separated list of file names (each file contains one date series,
    #         e.g. data for one flow). The format of each file is CSV-style, but the
    #         separator does not have to be a comma (can be set with SEP). The first
    #         column contains the timestamps. The second, third etc. columns contain
    #         data, but only one of these columns will be plotted (set with YINDEX).
    # TC_LNAMES: comma-separated list of legend names. this list has the same length
    #         as FNAMES and each entry corresponds to data in file name with the
    #         same index in FNAMES
    # TC_YLAB:   y-axis label character string
    # TC_SEP:    column separator used in data file
    # TC_OTYPE:  type of output graph (default is 'pdf')
    # TC_OPREFIX: the prefix (first part) of the graph file name
    # TC_ODIR:   directory where output files, e.g. pdfs are placed
    # TC_YMIN:   minimum value on y-axis (for zooming in), default is 0
    # TC_YMAX:   maximum value on y-axis (for zooming in), default is 0 meaning the
    #         maximum value is determined from the data
    # TC_STIME:  start time on x-axis (for zooming in), default is 0.0 meaning the start
    #         of an experiment
    # TC_ETIME:  end time on x-axis (for zooming in), default is 0.0 meaning the end of an
    #         experiment a determined from the data

    #local('which R')
    local(
        'TC_TITLE="%s" TC_FNAMES="%s" TC_LNAMES="%s" TC_YLAB="%s" TC_SEP="%s" TC_OTYPE="%s" '
        'TC_OPREFIX="%s" TC_ODIR="%s" TC_YMIN="%s" TC_YMAX="%s" TC_STIME="%s" TC_ETIME="%s" %s '
        '%s %s%s_plot_dash_goodput.Rout' %
        (title, ','.join(file_names), ','.join(leg_names), ylab, sep, otype,
         oprefix, pdf_dir, ymin, ymax, stime, etime, plot_params, plot_script,
         pdf_dir, oprefix))

    if config.TPCONF_debug_level == 0:
        local('rm -f %s%s_plot_dash_goodput.Rout' % (pdf_dir, oprefix))
Ejemplo n.º 11
0
def plot_time_series(title='',
                     files={},
                     ylab='',
                     yindex=2,
                     yscaler=1.0,
                     otype='',
                     oprefix='',
                     pdf_dir='',
                     sep=' ',
                     aggr='',
                     omit_const='0',
                     ymin=0,
                     ymax=0,
                     lnames='',
                     stime='0.0',
                     etime='0.0',
                     groups={},
                     sort_flowkey='1',
                     boxplot='',
                     plot_params='',
                     plot_script='',
                     source_filter=''):

    file_names = []
    leg_names = []
    _groups = []

    if sort_flowkey == '1':
        sorted_files = sort_by_flowkeys(files, source_filter)
    else:
        sorted_files = files.items()

    sorted_files = sort_by_group_id(sorted_files, groups)

    for name, file_name in sorted_files:
        leg_names.append(name)
        file_names.append(file_name)
        _groups.append(groups[file_name])

    if lnames != '':
        lname_arr = lnames.split(';')
        if boxplot == '0' and len(lname_arr) != len(leg_names):
            abort(
                'Number of legend names must be the same as the number of flows'
            )
        else:
            leg_names = lname_arr

    # get the directory name here if not specified
    if pdf_dir == '':
        pdf_dir = os.path.dirname(file_names[0]) + '/'
    else:
        pdf_dir = valid_dir(pdf_dir)
        # if not absolute dir, make it relative to experiment_dir
        # assume experiment dir is part before first slash
        if pdf_dir[0] != '/':
            pdf_dir = file_names[0].split('/')[0] + '/' + pdf_dir
        # if pdf_dir specified create if it doesn't exist
        mkdir_p(pdf_dir)

    if plot_script == '':
        plot_script = 'R CMD BATCH --vanilla %s/plot_time_series.R' % \
                      config.TPCONF_script_path

    # interface between this code and the plot function are environment variables
    # the following variables are passed to plot function:
    # TC_TITLE:  character string that is plotted over the graph
    # TC_FNAMES: comma-separated list of file names (each file contains one date series,
    #         e.g. data for one flow). The format of each file is CSV-style, but the
    #         separator does not have to be a comma (can be set with SEP). The first
    #         column contains the timestamps. The second, third etc. columns contain
    #         data, but only one of these columns will be plotted (set with YINDEX).
    # TC_LNAMES: comma-separated list of legend names. this list has the same length
    #         as FNAMES and each entry corresponds to data in file name with the
    #         same index in FNAMES
    # TC_YLAB:   y-axis label character string
    # TC_YINDEX: index of data column in file to plot on y-axis (file can have more than
    #         one data column)
    # TC_YSCALER: factor which is multiplied with each data value before plotting
    # TC_SEP:    column separator used in data file
    # TC_OTYPE:  type of output graph (default is 'pdf')
    # TC_OPREFIX: the prefix (first part) of the graph file name
    # TC_ODIR:   directory where output files, e.g. pdfs are placed
    # TC_AGGR:   set to '1' means data is aggregated over time intervals, more specifically
    #         the data is summed over the time intervals (used to determine throughput
    #         over time windows based on packet lengths)
    #         set to '0' means plot data as is
    # TC_OMIT_CONST: '0' don't omit anything,
    #             '1' omit any data series from plot that are 100% constant
    # TC_YMIN:   minimum value on y-axis (for zooming in), default is 0
    # TC_YMAX:   maximum value on y-axis (for zooming in), default is 0 meaning the
    #         maximum value is determined from the data
    # TC_STIME:  start time on x-axis (for zooming in), default is 0.0 meaning the start
    #         of an experiment
    # TC_ETIME:  end time on x-axis (for zooming in), default is 0.0 meaning the end of an
    #         experiment a determined from the data
    # TC_GROUPS: comma-separated list of group IDs (integer numbers). This list has
    #         the same length as FNAMES. If data from different experiments is plotted,
    #         each experiment will be assigned a different number and these are passed
    #         via GROUPS. This allows the plotting function to determine which data
    #         series are (or are not) from the same experiment, so that results
    #         from different experiments, that started at different times, can be
    #         plotted in the same graph.
    # TC_BOXPL:  '0' plot each point on time axis
    #         '1' plot a boxplot over all data points from all data seres for each
    #         distinct timestamp (instead of a point for each a data series)

    #local('which R')
    local(
        'TC_TITLE="%s" TC_FNAMES="%s" TC_LNAMES="%s" TC_YLAB="%s" TC_YINDEX="%d" TC_YSCALER="%f" '
        'TC_SEP="%s" TC_OTYPE="%s" TC_OPREFIX="%s" TC_ODIR="%s" TC_AGGR="%s" TC_OMIT_CONST="%s" '
        'TC_YMIN="%s" TC_YMAX="%s" TC_STIME="%s" TC_ETIME="%s" TC_GROUPS="%s" TC_BOXPL="%s" %s '
        '%s %s%s_plot_time_series.Rout' %
        (title, ','.join(file_names), ','.join(leg_names), ylab, yindex,
         yscaler, sep, otype, oprefix, pdf_dir, aggr, omit_const, ymin, ymax,
         stime, etime, ','.join(map(str, _groups)), boxplot, plot_params,
         plot_script, pdf_dir, oprefix))

    if config.TPCONF_debug_level == 0:
        local('rm -f %s%s_plot_time_series.Rout' % (pdf_dir, oprefix))
Ejemplo n.º 12
0
def run_experiment(test_id='', test_id_pfx='', *args, **kwargs):

    do_init_os = kwargs.get('do_init_os', '1')
    ecn = kwargs.get('ecn', '0')
    tcp_cc_algo = kwargs.get('tcp_cc_algo', 'default')
    duration = kwargs.get('duration', '')
    if duration == '':
        abort('No experiment duration specified')

    # create sub directory for test id prefix
    mkdir_p(test_id_pfx)

    # log experiment in started list
    local('echo "%s" >> experiments_started.txt' % test_id)

    puts('\n[MAIN] Starting experiment %s \n' % test_id)

    tftpboot_dir = ''
    try:
        tftpboot_dir = config.TPCONF_tftpboot_dir
    except AttributeError:
        pass

    # initialise
    if tftpboot_dir != '' and do_init_os == '1':
        execute(
            get_host_info,
            netint='0',
            hosts=config.TPCONF_router +
            config.TPCONF_hosts)
        execute(
            init_os_hosts,
            file_prefix=test_id_pfx,
            local_dir=test_id_pfx)  # reboot
        clear_type_cache()  # clear host type cache
        disconnect_all()  # close all connections
        time.sleep(30)  # give hosts some time to settle down (after reboot)

    # initialise topology
    try:
        switch = '' 
        port_prefix = ''
        port_offset = 0
        try:
            switch = config.TPCONF_topology_switch
            port_prefix = config.TPCONF_topology_switch_port_prefix
            port_offset = config.TPCONF_topology_switch_port_offset
        except AttributeError:
            pass 

        if config.TPCONF_config_topology == '1' and do_init_os == '1' and  \
           not len(config.TPCONF_router) > 1:
            # we cannot call init_topology directly, as it is decorated with
            # runs_once. in experiment.py we have empty host list whereas if we
            # run init_topology from command line we have the -H host list. executing
            # an runs_once task with empty host list (hosts set in execute call), it
            # will only be executed for the first host, which is not what we
            # want. in contrast if we have a host list in context, execute will be
            # executed once for each host (hence we need runs_once when called from
            # the command line).

            # sequentially configure switch
            execute(init_topology_switch, switch, port_prefix, port_offset,
                   hosts = config.TPCONF_hosts)
            # configure hosts in parallel
            execute(init_topology_host, hosts = config.TPCONF_hosts)

    except AttributeError:
        pass

    file_cleanup(test_id_pfx)  # remove any .start files
    execute(
        get_host_info,
        netmac='0',
        hosts=config.TPCONF_router +
        config.TPCONF_hosts)
    execute(sanity_checks)
    execute(init_hosts, *args, **kwargs)

    # first is the legacy case with single router and single queue definitions
    # second is the multiple router case with several routers and several queue
    # definitions 
    if isinstance(config.TPCONF_router_queues, list):
        # start queues/pipes
        config_router_queues(config.TPCONF_router_queues, config.TPCONF_router, 
                             **kwargs)
        # show pipe setup
        execute(show_pipes, hosts=config.TPCONF_router)
    elif isinstance(config.TPCONF_router_queues, dict):
        for router in config.TPCONF_router_queues.keys():
            # start queues/pipes for router r
            config_router_queues(config.TPCONF_router_queues[router], [router], 
                                 **kwargs)
            # show pipe setup
            execute(show_pipes, hosts=[router])

    # log config parameters
    execute(
        log_config_params,
        file_prefix=test_id,
        local_dir=test_id_pfx,
        hosts=['MAIN'],
        *args,
        **kwargs)
    # log host tcp settings
    execute(
        log_host_tcp,
        file_prefix=test_id,
        local_dir=test_id_pfx,
        hosts=['MAIN'],
        *args,
        **kwargs)

    # start all loggers
    execute(
        start_loggers,
        file_prefix=test_id,
        local_dir=test_id_pfx,
        remote_dir=config.TPCONF_remote_dir)

    # Start broadcast ping and loggers (if enabled)
    try: 
        if config.TPCONF_bc_ping_enable == '1':
            # for multicast need IP of outgoing interface
            # which is router's control interface
            use_multicast = socket.gethostbyname(
                    config.TPCONF_router[0].split(':')[0])
 
            # get configured broadcast or multicast address
            bc_addr = '' 
            try:
                bc_addr = config.TPCONF_bc_ping_address
            except AttributeError:
                # use default multicast address
                bc_addr = '224.0.1.199'

            execute(
                start_bc_ping_loggers,
                file_prefix=test_id,
                local_dir=test_id_pfx,
                remote_dir=config.TPCONF_remote_dir,
                bc_addr=bc_addr)

            try:
                bc_ping_rate = config.TPCONF_bc_ping_rate
            except AttributeError:
                bc_ping_rate = '1'

            # start the broadcst ping on the first router
            execute(start_bc_ping,
                file_prefix=test_id,
                local_dir=test_id_pfx,
                remote_dir=config.TPCONF_remote_dir,
                bc_addr=bc_addr,
                rate=bc_ping_rate,
                use_multicast=use_multicast,
                hosts = [config.TPCONF_router[0]])
    except AttributeError:
        pass

    # start traffic generators
    sync_delay = 5.0
    max_wait_time = sync_delay
    start_time = datetime.datetime.now()
    for t, c, v in sorted(config.TPCONF_traffic_gens, cmp=_cmp_timekeys):

        try:
            # delay everything to have synchronised start
            next_time = float(t) + sync_delay
        except ValueError:
            abort('Traffic generator entry key time must be a float')

        if next_time > max_wait_time:
            max_wait_time = next_time

        # add the kwargs parameter to the call of _param
        v = re.sub("(V_[a-zA-Z0-9_-]*)", "_param('\\1', kwargs)", v)

        # trim white space at both ends
        v = v.strip()

        if v[-1] != ',':
            v = v + ','
        # add counter parameter
        v += ' counter="%s"' % c
        # add file prefix parameter
        v += ', file_prefix=test_id'
        # add remote dir
        v += ', remote_dir=\'%s\'' % config.TPCONF_remote_dir
        # add test id prefix to put files into correct directory
        v += ', local_dir=\'%s\'' % test_id_pfx
        # we don't need to check for presence of tools inside start functions
        v += ', check="0"'

        # set wait time until process is started
        now = datetime.datetime.now()
        dt_diff = now - start_time
        sec_diff = (dt_diff.days * 24 * 3600 + dt_diff.seconds) + \
            (dt_diff.microseconds / 1000000.0)
        if next_time - sec_diff > 0:
            wait = str(next_time - sec_diff)
        else:
            wait = '0.0'
        v += ', wait="' + wait + '"'

        _nargs, _kwargs = eval('_args(%s)' % v)
        execute(*_nargs, **_kwargs)

    # print process list
    print_proc_list()

    # wait until finished (add additional 5 seconds to be sure)
    total_duration = float(duration) + max_wait_time + 5.0
    puts('\n[MAIN] Running experiment for %i seconds\n' % int(total_duration))
    time.sleep(total_duration)

    # shut everything down and get log data
    execute(stop_processes, local_dir=test_id_pfx)
    execute(
        log_queue_stats,
        file_prefix=test_id,
        local_dir=test_id_pfx,
        hosts=config.TPCONF_router)

    # log test id in completed list
    local('echo "%s" >> experiments_completed.txt' % test_id)

    # kill any remaining processes
    execute(kill_old_processes,
            hosts=config.TPCONF_router +
            config.TPCONF_hosts)

    # done
    puts('\n[MAIN] COMPLETED experiment %s \n' % test_id)