예제 #1
0
 def test_validate(self, load, ra, swift, ceph, opsmgr):
     file_path = 'path'
     test_mod.validate(file_path)
     load.assert_called_once_with(file_path)
     ra.assert_called_once_with(load.return_value)
     swift.assert_called_once_with(load.return_value)
     ceph.assert_called_once_with(load.return_value)
     opsmgr.assert_called_once_with(load.return_value)
예제 #2
0
def main(config_dir, output_dir):
    """Run the experiment."""
    config, msg = validate(config_dir)
    if config is None:
        write_status(output_dir, False, msg)
        return 1

    try:
        result = {}
        config_iter = itertools.product(
                config['models'],
                config['targets'],
                config['devices'])
        for (model, target, device) in config_iter:
            # TODO(weberlo): There has to be some idiom to get rid of this boilerplate.
            if model not in result:
                result[model] = {}
            if target not in result[model]:
                result[model][target] = {}
            if device not in result[model][target]:
                result[model][target][device] = {}
            result[model][target][device] = run_single(model, target, device, config)
    except Exception as e:
        write_status(output_dir, False, 'Exception encountered:\n' + render_exception(e))
        return 1

    write_json(output_dir, 'data.json', result)
    write_status(output_dir, True, 'success')
예제 #3
0
def main(config_dir, output_dir):
    config, msg = validate(config_dir)
    if config is None:
        write_status(output_dir, False, msg)
        return 1

    if 'pt' not in config['frameworks']:
        write_status(output_dir, True, 'PT not run')
        return 0

    datasets = config['datasets']
    for dataset, max_idx in datasets:
        success, msg = run_trials(
            'pt',
            'treelstm',
            config['dry_run'],
            config['n_times_per_input'],
            config['n_inputs'],
            treelstm_trial,
            treelstm_setup,
            treelstm_teardown, ['device', 'dataset', 'idx'],
            [config['devices'], [dataset], [i for i in range(max_idx)]],
            path_prefix=output_dir,
            append_to_csv=True)
        if not success:
            write_status(output_dir, success, msg)
            return 1
    write_status(output_dir, True, 'success')
예제 #4
0
def main(data_dir, config_dir, output_dir):
    try:
        config, msg = validate(config_dir)
        if config is None:
            write_status(output_dir, False, msg)
            return 1

        summary = {}
        for model in sorted(config['models']):
            summary[model] = []
            # the script will not be run if there is an error
            cmd_id = 0
            for _, _, exp_config in parse_commands(model, config):
                for combo in unfold_settings(exp_config):
                    stats, msg = parse_data_file(exp_config['type'], model,
                                                 config, combo, data_dir,
                                                 cmd_id)
                    if stats is None:
                        write_status(output_dir, False, msg)
                        return 1
                    stats['command_id'] = cmd_id
                    summary[model].append(stats)
                cmd_id += 1
        write_json(output_dir, 'data.json', summary)
        write_status(output_dir, True, 'success')
    except Exception as e:
        write_status(output_dir, False, render_exception(e))
예제 #5
0
def main(data_dir, config_dir, output_dir):
    config, msg = validate(config_dir)
    if config is None:
        write_status(output_dir, False, msg)
        return 1

    all_data = sort_data(data_dir)
    most_recent = all_data[-1]
    most_recent = {k: v for (k, v) in most_recent.items() if k not in METADATA_KEYS}
    summary = ''

    for (model, targets) in most_recent.items():
        # simulated target summary
        sim_targets = {target: targets[target] for target in targets if target in SIM_TARGETS}
        for (target, devices) in sim_targets.items():
            for (device, stats) in devices.items():
                summary += '_Stats on ({}, {}, {}) & _\n'.format(model, target.upper(), device.upper())
                for (stat, val) in stats.items():
                    summary += '{}: {:.2E}\n'.format(stat, Decimal(val))
        # physical target summary
        phys_targets = {target: v for (target, v) in targets.items() if target in PHYS_TARGETS}
        for (target, devices) in phys_targets.items():
            for (device, mean_time) in devices.items():
                summary += 'Time on ({}, {}, {}): {:.2f}\n'.format(
                        model, target.upper(), device.upper(), mean_time)

    write_summary(output_dir, config['title'], summary)
    write_status(output_dir, True, 'success')
예제 #6
0
def main(config_dir, output_dir):
    config, msg = validate(config_dir)
    if config is None:
        write_status(output_dir, False, msg)
        return 1

    if not config['methods']:
        write_status(output_dir, True, 'Nothing run')
        return 0

    for model in sorted(config['models']):
        cmd_id = 0
        for success, msg, processed_command in parse_commands(model, config):
            if not success:
                write_status(output_dir, False, msg)
                return 1
            else:
                print(f'Running on command: {model}: {processed_command}')
                success, msg = eval_command(model, processed_command, config,
                                            config_dir, output_dir, cmd_id)
                if not success:
                    write_status(output_dir, False, msg)
                    return 1
                cmd_id += 1

    write_status(output_dir, True, 'success')
예제 #7
0
파일: run.py 프로젝트: uwsampl/relay-bench
def main(config_dir, output_dir):
    """Run the experiment."""
    config, msg = validate(config_dir)
    if config is None:
        write_status(output_dir, False, msg)
        return 1

    try:
        # the experiment involves RPC calls that could potentially hang so we have a timeout on our end too
        killswitch = Timer(config.get('timeout', 300),
                           lambda: timeout_failure(output_dir))
        killswitch.start()

        result = {}
        config_iter = itertools.product(config['models'], config['targets'],
                                        config['devices'])
        for (model, target, device) in config_iter:
            # TODO(weberlo): There has to be some idiom to get rid of this boilerplate.
            if model not in result:
                result[model] = {}
            if target not in result[model]:
                result[model][target] = {}
            if device not in result[model][target]:
                result[model][target][device] = {}
            result[model][target][device] = run_single(model, target, device,
                                                       config)

        killswitch.cancel()
    except Exception as e:
        write_status(output_dir, False,
                     'Exception encountered:\n' + render_exception(e))
        return 1

    write_json(output_dir, 'data.json', result)
    write_status(output_dir, True, 'success')
예제 #8
0
def main(data_dir, config_dir, output_dir):
    config, msg = validate(config_dir)
    if config is None:
        write_status(output_dir, False, msg)
        return 1

    # No further analysis is required beyond the raw stats reported by the VTA
    # simulator, so we just propagate the data to the next stage of the
    # pipeline.
    data = read_json(data_dir, 'data.json')
    write_json(output_dir, 'data.json', data)
    write_status(output_dir, True, 'success')
예제 #9
0
def main(data_dir, config_dir, output_dir):
    try:
        config, msg = validate(config_dir)
        if config is None:
            write_status(output_dir, False, msg)
            return 1

        all_data = sort_data(data_dir)
        most_recent = all_data[-1]
        success, msg = render_graph(config, most_recent, output_dir)
        write_status(output_dir, success, msg)
    except Exception as e:
        write_status(output_dir, False,
                     'Exception encountered: ' + render_exception(e))
        return 1
    finally:
        plt.close()
예제 #10
0
def main(data_dir, config_dir, output_dir):
    config, msg = validate(config_dir)
    if config is None:
        write_status(output_dir, False, msg)
        return 1

    # read in data, output graphs of most recent data, and output longitudinal graphs
    all_data = sort_data(data_dir)
    most_recent = all_data[-1]

    try:
        generate_longitudinal_comparisons(all_data, output_dir)
        generate_arm_vta_comparisons(most_recent, output_dir)
    except Exception as e:
        write_status(output_dir, False, 'Exception encountered:\n' + render_exception(e))
        return 1

    write_status(output_dir, True, 'success')
예제 #11
0
def main(data_dir, config_dir, output_dir):
    try:
        config, msg = validate(config_dir)
        if config is None:
            write_status(output_dir, False, msg)
            return 1

        all_data = sort_data(data_dir)
        most_recent = all_data[-1]

        summary = summarize(config, most_recent)
        write_summary(output_dir, 'Pareto Curve Trial', summary)
        write_status(output_dir, True, 'success')

    except Exception as e:
        write_status(output_dir, False,
                     'Exception encountered: ' + render_exception(e))
        return 1
예제 #12
0
def main(config_dir, output_dir, method, dataset):
    config, msg = validate(config_dir)
    if config is None:
        write_status(output_dir, False, msg)
        return 1

    if 'relay' not in config['frameworks']:
        write_status(output_dir, True, 'Relay not run')
        return 0

    if method not in config['relay_methods']:
        write_status(output_dir, True, '{} not run'.format(method))
        return 0

    datasets = config['datasets']
    max_idx = -1
    for pair in config['datasets']:
        if pair[0] == dataset:
            max_idx = pair[1]
            break

    # dataset is not included in the config, so skip
    if max_idx == -1:
        write_status(output_dir, True, 'Dataset {} not run'.format(dataset))
        return 0

    success, msg = run_trials(
        'relay',
        'treelstm',
        config['dry_run'],
        config['n_times_per_input'],
        config['n_inputs'],
        treelstm_trial,
        treelstm_setup,
        treelstm_teardown, ['device', 'method', 'dataset', 'idx'],
        [config['devices'], [method], [dataset], [i for i in range(max_idx)]],
        path_prefix=output_dir,
        append_to_csv=True)
    if not success:
        write_status(output_dir, success, msg)
        return 1
    write_status(output_dir, True, 'success')
예제 #13
0
def main(config_dir,
         experiment_mode,
         model_name,
         input_idx,
         params_file,
         out_file,
         trial_run=False,
         trial_run_outfile=None):
    config, msg = validate(config_dir)
    if config is None:
        print(msg)
        return 1

    use_dtr = (experiment_mode == 'dtr')

    i = int(input_idx)
    is_trial = trial_run == 'True'

    if config['set_seed']:
        torch.manual_seed(config['seed'] + i)
        random.seed(config['seed'] + i)

    cwd = os.getcwd()

    # handle specific params, esp. for DTR
    specific_params = read_json(cwd, params_file)
    assert 'batch_size' in specific_params
    if use_dtr:
        assert 'memory_budget' in specific_params
        if specific_params['memory_budget'] > 0:
            print(f'Setting budget to {int(specific_params["memory_budget"])}')
            torch.set_memory_budget(int(specific_params['memory_budget']))
    if is_trial:
        timing_loop(model_name, i, config, use_dtr, specific_params, None,
                    True, trial_run_outfile)
        return

    with open(out_file, 'a', newline='') as csvfile:
        writer = create_csv_writer(csvfile, specific_params)
        timing_loop(model_name, i, config, use_dtr, specific_params, writer)
예제 #14
0
def main(config_dir, output_dir, device):
    config, msg = validate(config_dir)
    if config is None:
        write_status(output_dir, False, msg)
        return 1

    if 'tf' not in config['frameworks']:
        write_status(output_dir, True, 'TF not run')
        return 0

    if device not in config['devices']:
        write_status(output_dir, True, 'TF not run on {}'.format(device))
        return 0

    configure_seed(config)

    enable_xla = [False]
    if config['use_xla']:
        enable_xla.append(True)

    success, msg = run_trials(
        'tf',
        'cnn_comp',
        config['dry_run'],
        config['n_times_per_input'],
        config['n_inputs'],
        cnn_trial,
        cnn_setup,
        cnn_teardown, ['network', 'device', 'batch_size', 'enable_xla'],
        [config['networks'], [device], config['batch_sizes'], enable_xla],
        path_prefix=output_dir,
        append_to_csv=True)

    write_status(output_dir, success, msg)
    if not success:
        return 1
예제 #15
0
from config import CONFIG
from snekrouter import SnekRouter
import time, threading, socket
from http.server import HTTPServer
from validate_config import validate

validate()

sock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
addr = (CONFIG['HOST'],CONFIG['PORT'])
number_threads = CONFIG['NUMBER_THREADS']
sock.bind(addr)
sock.listen(number_threads)

class Thread(threading.Thread):

	def __init__(self, i):

		threading.Thread.__init__(self)
		self.i, self.daemon = i, True
		self.start()

	def run(self):

		httpd = HTTPServer(addr, SnekRouter, False)
		httpd.socket = sock
		httpd.server_bind = self.server_close = lambda self: None
		httpd.serve_forever()

def main():