예제 #1
0
def run_tests():
    error_suggestion = '\n\nIf you intentionally changed the output or tests, please run ./%s --regenerate-reference\n\n' % os.path.relpath(
        this_file_path)
    print(
        'Running tests against reference values in tests/results/changes/ ...')
    success = True
    for test in test_suite:
        dirname, config_abspath = get_or_make_dir(test)
        print('Preparing to run test in %s/ ...' % dirname)

        ref_path = get_reference_path(dirname)
        assert os.path.exists(ref_path), 'Cannot find ' + os.path.relpath(
            ref_path) + error_suggestion
        with open(ref_path, 'rb') as f:
            ref = pickle.load(f)

        timeloop.run_timeloop(dirname, config_abspath)

        stats = parse_timeloop_output.parse_timeloop_stats(dirname)

        if diff(ref, stats):
            print('Test failed in %s\n' % dirname)
            success = False
        else:
            print('Test passed in %s' % dirname)
    print('Done running tests in tests/results/changes/.')
    if success:
        print('All tests passed.')
    else:
        print('Some tests failed.')
        print(error_suggestion)
예제 #2
0
def regenerate_reference():
    print(
        'Overwriting all reference pickle files in tests/results/changes/ ...')
    for test in test_suite:
        dirname, config_abspath = get_or_make_dir(test)
        print('Running test in %s/ ...' % dirname)

        timeloop.run_timeloop(dirname, config_abspath)

        stats = parse_timeloop_output.parse_timeloop_stats(dirname)

        assert not diff(stats, stats)

        ref_path = get_reference_path(dirname)
        print('Writing results to %s ...' % ref_path)
        with open(ref_path, 'wb') as f:
            pickle.dump(stats, f, pickle.HIGHEST_PROTOCOL)
    print('Done writing reference pickle files.')
예제 #3
0
                print(
                    f'Preparing to run timeloop for layer {name} at t {args.temp} cooling {cooling} max iter {max_iter} beta {beta}'
                )

                dirname = f'run/{name}/{args.temp}_{cooling}_{max_iter}_{beta}/'
                subprocess.check_call(['mkdir', '-p', dirname])

                if os.path.isfile(dirname + 'timeloop-mapper.stats.txt'):
                    print(
                        'The current sparse problem evaluated already, skip!')
                    continue

                timeloop.run_timeloop(dirname,
                                      configfile=config_abspath,
                                      workload_bounds=problem,
                                      t=args.temp,
                                      cooling=cooling,
                                      max_iter=max_iter,
                                      beta=beta)

                stats = parse_timeloop_output.parse_timeloop_stats(dirname)
                if stats == {}:
                    print(
                        "Timeloop couldn't find a mapping for this problem within the search parameters, please check the log for more details."
                    )
                else:
                    print(
                        "Run successful, see log for text stats, or use the Python parser to parse the XML stats."
                    )
                    # print("Stats from run:")
                    # pprint.pprint(stats)
예제 #4
0
파일: sample.py 프로젝트: ajis01/CS259Mini2
# Just test that path points to a valid config file.
with open(config_abspath, 'r') as f:
    config = yaml.load(f)
    #config = libconf.load(f)

for i in range(0, len(cnn_layers)):
    problem = cnn_layers[i]

    print("Preparing to run timeloop for problem index ", i)

    dirname = 'run/problem_' + str(i) + '/'
    subprocess.check_call(['mkdir', '-p', dirname])

    timeloop.run_timeloop(dirname,
                          configfile=config_abspath,
                          workload_bounds=problem)

    stats = parse_timeloop_output.parse_timeloop_stats(dirname)
    if stats == {}:
        print(
            "Timeloop couldn't find a mapping for this problem within the search parameters, please check the log for more details."
        )
    else:
        print(
            "Run successful, see log for text stats, or use the Python parser to parse the XML stats."
        )
        # print("Stats from run:")
        # pprint.pprint(stats)

print("DONE.")
예제 #5
0
def main():
    parser = argparse.ArgumentParser(description='Run Timeloop')
    parser.add_argument('--config',
                        '-c',
                        default='../configs/mapper/VGGN.yaml',
                        help='config file')  # yaml
    parser.add_argument('--log',
                        '-l',
                        default='timeloop.log',
                        help='name of log file')
    parser.add_argument('--output_dir',
                        '-o',
                        default='results',
                        help='name of log file')
    parser.add_argument(
        '--fake_eval',
        nargs='?',
        type=str2bool,
        const=True,
        default=False,
        help='test evaluation that no actual evalution is performed')

    # search configuration
    parser.add_argument(
        '--net',
        default='vgg',
        choices=['vgg', 'wrn', 'dense', 'resnet18', 'mobilenetv2'],
        help='model name')
    parser.add_argument('--dataset',
                        default='cifar-10',
                        choices=['cifar-10', 'imagenet'],
                        help='dataset name')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=64,
                        help='batchsize of the problem')
    parser.add_argument('--dataflow',
                        default='CN',
                        choices=['CK', 'CN', 'KN', 'CP', 'KP', 'PQ', 'PN'],
                        help='spatial mapping')
    parser.add_argument('--phase',
                        default='fw',
                        choices=['fw', 'bw', 'wu'],
                        help='Training phase')  # , 'wu'

    # mapper configuration
    parser.add_argument(
        '--terminate',
        nargs='?',
        type=int,
        const=1000,
        help=
        'termination condition: number of consecutive suboptimal valid mapping found'
    )
    parser.add_argument('--threads',
                        nargs='?',
                        type=int,
                        const=32,
                        help='number of threads used for mapping search')
    # synthetic mask
    parser.add_argument('--synthetic',
                        nargs='?',
                        type=str2bool,
                        const=True,
                        default=False,
                        help='Is data mask synthetic?')
    parser.add_argument('--sparsity',
                        nargs='?',
                        type=float,
                        const=0.1,
                        help='synthetic sparsity of the problem')
    parser.add_argument('--act_sparsity',
                        default='_act_sparsity.json',
                        help='file suffix for activation sparsity')
    # usually let's not provide this flag to save some space?
    parser.add_argument('--save',
                        nargs='?',
                        type=str,
                        const='saved_synthetic_mask',
                        help='name of saved synthetic mask')

    # naive replication
    parser.add_argument('--replication',
                        nargs='?',
                        type=str2bool,
                        const=True,
                        default=False,
                        help='do we apply naive replication?')

    # scability exp
    parser.add_argument('--array_width',
                        type=int,
                        default=16,
                        help='PE array width')
    parser.add_argument('--glb_scaling',
                        nargs='?',
                        type=str2bool,
                        const=True,
                        default=False,
                        help='scale GLB based on array_width')

    # Evaluate Dense Timeloop?
    parser.add_argument('--dense',
                        nargs='?',
                        type=str2bool,
                        const=True,
                        default=False,
                        help='evaluate use original timeloop')
    parser.add_argument('--dense_dirname',
                        default='dense-timeloop',
                        help='directory name of dense timeloop')
    args = parser.parse_args()

    print(args)

    with open(args.config, 'r') as f:
        config = yaml.load(f, Loader=yaml.SafeLoader)  # load yaml

    use_act_sparsity = False
    if args.phase == 'wu' and not args.dense:
        with open(args.net + args.act_sparsity) as f:
            act_sparsity_data = json.load(f)
            use_act_sparsity = True

    target_layers = layer_sizes[args.dataset][args.net]
    target_names = layer_names[args.dataset][args.net]

    total_cycles = 0
    total_energy = 0
    for i in range(0, len(target_names)):
        problem = target_layers[i]
        name = target_names[i]

        # TODO: redesign path
        # make configuration first, layer last to facilitate potential overall speedup and energy saving

        # training phase, batchsize, network, and layer
        dirname = args.output_dir + f'/{args.array_width}_{args.glb_scaling}/{args.phase}_{args.batchsize}/{args.dataset}_{args.net}_{name}/'

        # which source accelerator we use and spatial dataflow
        dirname += ('dense' if args.dense else 'sparse') + '_{}'.format(
            args.dataflow) + ('_replicate/' if args.replication else '/')

        # synthetic (with target sparsity) or acutal mask
        dirname += 'synthetic_{}/'.format(
            args.sparsity) if args.synthetic else 'actual/'

        subprocess.check_call(['mkdir', '-p', dirname])
        print('Problem {} result dir: {}'.format(
            i, dirname))  # use this to hint the problem we are working on

        if args.dense:
            if os.path.isfile(dirname + 'timeloop-mapper.stats.txt'):
                print('The current dense problem evaluated already, skip!')
                continue
            if problem in target_layers[:i]:
                j = target_layers.index(problem)  # repeated index
                print(
                    'Same Config as Problem {} layer {}, skip evaluation and copy result'
                    .format(j, target_names[j]))
                src_dirname = args.output_dir + f'/{args.array_width}_{args.glb_scaling}/{args.phase}_{args.batchsize}/{args.dataset}_{args.net}_{target_names[j]}/'
                # which source accelerator we use and spatial dataflow
                src_dirname += ('dense' if args.dense else
                                'sparse') + '_{}'.format(args.dataflow) + (
                                    '_replicate/' if args.replication else '/')
                # synthetic (with target sparsity) or acutal mask
                src_dirname += 'synthetic_{}/'.format(
                    args.sparsity) if args.synthetic else 'actual/'
                copy_tree(src_dirname, dirname)
                continue
        else:
            if os.path.isfile(dirname + 'timeloop-mapper.stats.txt'):
                print('The current sparse problem evaluated already, skip!')
                continue
        # dump the all configuration to check, also hopefully helps
        # reproducibility
        with open(os.path.join(dirname, 'args.json'), 'w') as arg_log:
            json.dump(vars(args), arg_log)

        # (Optional): Adapt to Path module
        env_list = timeloop.rewrite_workload_bounds(
            src=args.config,
            dst=os.path.join(dirname, os.path.basename(args.config)),
            workload_bounds=problem,
            model=args.net,  # for actual mask only
            # datset = args.dataset, # unimplemented
            layer=name,  # for actual mask only
            batchsize=args.batchsize,
            dataflow=args.dataflow,
            phase=args.phase,
            terminate=args.terminate,
            threads=args.threads,
            synthetic=True if use_act_sparsity else args.synthetic,
            sparsity=act_sparsity_data[name]
            if use_act_sparsity else args.sparsity,
            save=args.save,
            replication=args.replication,
            array_width=args.array_width,
            glb_scaling=args.glb_scaling,
            dense=args.dense)

        if args.fake_eval:
            print(
                'in fake eval mode, the problem will be evaluate in actual run'
            )
            continue

        timeloop.run_timeloop(dirname=dirname,
                              configfile=args.config,
                              logfile=args.log,
                              env_list=env_list,
                              dense=args.dense,
                              dense_dirname=args.dense_dirname)

        cycle, energy, mac = parse_timeloop_output.parse_timeloop_stats(
            dirname, args.dense)
        if energy == {}:
            print(
                "Timeloop couldn't find a mapping for this problem within the search parameters, please check the log for more details."
            )
        else:
            print(
                "Run successful, see log for text stats, or use the Python parser to parse the XML stats."
            )

    print("DONE.")