Exemplo n.º 1
0
def run():
    args = _parse_args()
    logging.info("Parsed arguments in {}: {}".format(__name__, args))

    _prepare()

    nodes_config.create(unknown_arguments=True)
    ticks_config.create(unknown_arguments=True)
    network_config.create(unknown_arguments=True)

    for i in range(args.repeat):
        run_number = str(i + 1)
        logging.info('Starting {}/{} simulation'.format(run_number, args.repeat))

        utils.update_args(Namespace(tag_appendix='_' + run_number))
        simulation_cmd.run(unknown_arguments=True)

        bash.check_output('cp -r {}/postprocessing {}/run-{}'
                          .format(config.soft_link_to_run_dir, config.soft_link_to_multi_run_dir, run_number))
        bash.check_output('cp {} {}/run-{}'.format(config.run_log, config.soft_link_to_multi_run_dir, run_number))
        logging.info('Finished {}/{} simulation'.format(run_number, args.repeat))

    for file in [config.args_csv, config.ticks_csv, config.analysed_ticks_csv,
                 config.general_infos_csv, config.nodes_csv, config.network_csv]:
        bash.check_output('cp {} {}/.'.format(file, config.soft_link_to_multi_run_dir))
    _concat_files()

    bash.check_output(rcmd.create_report(config.soft_link_to_multi_run_dir))
    logging.info('Created report in folder={}'.format(config.soft_link_to_multi_run_dir))
Exemplo n.º 2
0
def create(unknown_arguments=False):
    logging.info('Call nodes config')

    parser = _create_parser()
    if unknown_arguments:
        args = parser.parse_known_args(sys.argv[2:])[0]
    else:
        args = parser.parse_args(sys.argv[2:])
    logging.info("Parsed arguments in {}: {}".format(__name__, args))
    utils.update_args(args)

    nodes = []
    for index, node_group in enumerate(node_groups):
        node_args = getattr(args, node_group['variable'])
        if node_args:
            if len(node_args) != config.number_of_node_group_arguments:
                parser.exit(-1, 'Pass all {} arguments [amount] [share] for {}\n'
                            .format(config.number_of_node_group_arguments, node_group['variable']))

            nodes.extend(_create_node_group(node_args, node_group['variable'], index + 1))

    logging.info('Created {}:'.format(config.nodes_csv))
    print(json.dumps([node for node in nodes], indent=4))

    with open(config.nodes_csv, 'w') as file:
        writer = csv.writer(file)
        writer.writerow(['group', 'name', 'share', 'latency'])
        writer.writerows(
            [[node.group, node.name, node.share, node.latency] for node in nodes])
    logging.info('End nodes config')
Exemplo n.º 3
0
def create(unknown_arguments=False):
    logging.info('Called network config')

    utils.check_for_file(config.nodes_csv)
    nodes = utils.read_csv(config.nodes_csv)

    parser = _create_parser()
    if unknown_arguments:
        args = parser.parse_known_args(sys.argv[2:])[0]
    else:
        args = parser.parse_args(sys.argv[2:])
    logging.info("Parsed arguments in {}: {}".format(__name__, args))
    utils.update_args(args)

    random.seed(args.seed)

    header = _create_header(nodes)

    matrix = _create_matrix(header, args.connectivity)

    if _check_if_fully_connected(matrix) is not True:
        raise Exception(
            "Not all nodes a reachable. Consider to raise the connectivity.")

    logging.info('Created {}:'.format(config.network_csv))
    print(pandas.DataFrame(matrix))

    with open(config.network_csv, "w") as file:
        writer = csv.writer(file)
        writer.writerows(matrix)
    logging.info('End network config')
Exemplo n.º 4
0
def create(unknown_arguments=False):
    logging.info('Called ticks config')

    utils.check_for_file(config.nodes_csv)
    nodes = utils.read_csv(config.nodes_csv)

    parser = _create_parser()
    if unknown_arguments:
        args = parser.parse_known_args(sys.argv[2:])[0]
    else:
        args = parser.parse_args(sys.argv[2:])
    logging.info("Parsed arguments in {}: {}".format(__name__, args))
    utils.update_args(args)

    random.seed(args.seed)

    block_events = _create_block_events(
        nodes, args.amount_of_ticks, args.blocks_per_tick)

    ticks = _create_ticks(
        nodes,
        block_events,
        args.txs_per_tick,
        args.amount_of_ticks)

    logging.info('Created {}:'.format(config.ticks_csv))
    print(pandas.DataFrame(ticks))

    with open(config.ticks_csv, "w") as file:
        writer = csv.writer(file)
        writer.writerows(ticks)
    logging.info('End ticks config')
Exemplo n.º 5
0
def _parse_args():
    parser = argparse.ArgumentParser()

    args = parser.parse_known_args(sys.argv[1:])[0]
    utils.update_args(args)

    return args
Exemplo n.º 6
0
def _parse_args():
    parser = argparse.ArgumentParser()

    parser.add_argument('--repeat', default=2, type=checkargs.check_positive_int, help='Number of repetition of the simulation.'
                        )

    args = parser.parse_known_args(sys.argv[2:])[0]
    utils.update_args(args)
    return args
Exemplo n.º 7
0
    def test_update_args_1(self, m_open):
        utils.update_args(Namespace(int=1, float=1.1, string='test'))

        handle = m_open()
        self.assertEqual(handle.write.call_count, 2)
        self.assertIn('string', handle.write.call_args_list[0][0][0])
        self.assertIn('float', handle.write.call_args_list[0][0][0])
        self.assertIn('int', handle.write.call_args_list[0][0][0])

        self.assertIn('1', handle.write.call_args_list[1][0][0])
        self.assertIn('1.1', handle.write.call_args_list[1][0][0])
        self.assertIn('test', handle.write.call_args_list[1][0][0])
Exemplo n.º 8
0
def test_update_args_2(m_open, m_read):
    Args = namedtuple('Args', 'int float')
    m_read.return_value = [Args(2, 2.2)]

    utils.update_args(Namespace(int=1, string='test'))

    handle = m_open()
    assert handle.write.call_count == 2

    assert '1' in handle.write.call_args_list[1][0][0]
    assert '2.2' in handle.write.call_args_list[1][0][0]
    assert 'test' in handle.write.call_args_list[1][0][0]
Exemplo n.º 9
0
def test_update_args_1(m_open):
    utils.update_args(Namespace(int=1, float=1.1, string='test'))

    handle = m_open()
    assert handle.write.call_count == 2
    assert 'string' in handle.write.call_args_list[0][0][0]
    assert 'float' in handle.write.call_args_list[0][0][0]
    assert 'int' in handle.write.call_args_list[0][0][0]

    assert '1' in handle.write.call_args_list[1][0][0]
    assert '1.1' in handle.write.call_args_list[1][0][0]
    assert 'test' in handle.write.call_args_list[1][0][0]
Exemplo n.º 10
0
    def test_update_args_2(self, m_open, m_read):
        Args = namedtuple('Args', 'int float')
        m_read.return_value = [Args(2, 2.2)]

        utils.update_args(Namespace(int=1, string='test'))

        handle = m_open()
        self.assertEqual(handle.write.call_count, 2)

        self.assertIn('1', handle.write.call_args_list[1][0][0])
        self.assertIn('2.2', handle.write.call_args_list[1][0][0])
        self.assertIn('test', handle.write.call_args_list[1][0][0])
Exemplo n.º 11
0
def _parse_args():
    parser = argparse.ArgumentParser()

    parser.add_argument('--verbose', action="store_true", help='Verbose log.')

    parser.add_argument('--tag',
                        default='run',
                        help='Tag that will be added to every csv file.')

    args = parser.parse_known_args(sys.argv[2:])[0]
    utils.update_args(args)

    return args
Exemplo n.º 12
0
    def __init__(self, args=DEFAULT_ARGS):
        super().__init__()
        self.args = update_args(DEFAULT_ARGS, args)

        if self.args.network_seed is None:
            self.args.network_seed = random.randrange(1e6)

        self.out_act = get_activation(self.args.out_act)
        self.m1_act = get_activation(self.args.m1_act)
        self.m2_act = get_activation(self.args.m2_act)

        self._init_vars()
        self.reset()
Exemplo n.º 13
0
def run(unknown_arguments=False):
    for file in [config.ticks_csv, config.network_csv, config.nodes_csv]:
        utils.check_for_file(file)

    parser = _create_parser()
    if unknown_arguments:
        args = parser.parse_known_args(sys.argv[2:])[0]
    else:
        args = parser.parse_args(sys.argv[2:])
    logging.info("Parsed arguments in {}: {}".format(__name__, args))
    utils.update_args(args)

    _check_skip_ticks(args.skip_ticks)

    context = Context()

    logging.info(config.log_line_run_start + context.run_name)

    tag = context.args.tag
    if hasattr(context.args, 'tag_appendix'):
        tag += context.args.tag_appendix
    writer = Writer(tag)
    runner = Runner(context, writer)

    prepare = Prepare(context)
    runner._prepare = prepare

    postprocessing = PostProcessing(context, writer)
    runner._postprocessing = postprocessing

    event = Event(context)
    runner._event = event

    start = time.time()

    runner.run()

    logging.info("The duration of the run was {} seconds".format(
        str(time.time() - start)))
Exemplo n.º 14
0
def run(unknown_arguments=False):
    for file in [config.ticks_csv, config.network_csv, config.nodes_csv]:
        utils.check_for_file(file)

    parser = _create_parser()
    if unknown_arguments:
        args = parser.parse_known_args(sys.argv[2:])[0]
    else:
        args = parser.parse_args(sys.argv[2:])
    logging.info("Parsed arguments in {}: {}".format(__name__, args))
    utils.update_args(args)

    _check_skip_ticks(args.skip_ticks)

    context = Context()

    logging.info(config.log_line_run_start + context.run_name)

    tag = context.args.tag
    if hasattr(context.args, 'tag_appendix'):
        tag += context.args.tag_appendix

    context.tag = tag

    runner = Runner(context)

    start = time.time()

    runner.run()

    end = time.time()

    duration = end - start

    Info().time_elapsed = str(duration)

    logging.info("The duration of the run was {} seconds".format(
        str(duration)))
Exemplo n.º 15
0
def create(unknown_arguments=False):
    logging.info('Called network config')

    utils.check_for_file(config.nodes_csv)
    nodes = utils.read_csv(config.nodes_csv)

    parser = _create_parser()
    if unknown_arguments:
        args = parser.parse_known_args(sys.argv[2:])[0]
    else:
        args = parser.parse_args(sys.argv[2:])
    logging.info("Parsed arguments in {}: {}".format(__name__, args))
    utils.update_args(args)

    random.seed(args.seed)

    header = _create_header(nodes)

    logging.info('Created {}:'.format(config.network_csv))

    with open(config.network_csv, "w") as file:
        writer = csv.writer(file)
    logging.info('End network config')
Exemplo n.º 16
0
    def __init__(self, args=DEFAULT_ARGS):
        super().__init__()
        self.args = update_args(DEFAULT_ARGS, args)

        if self.args.res_seed is None:
            self.args.res_seed = random.randrange(1e6)
        if self.args.res_x_seed is None:
            self.args.res_x_seed = np.random.randint(1e6)

        self.tau_x = 10
        self.activation = torch.tanh

        # use second set of dynamics equations as in jazayeri papers
        self.dynamics_mode = 0

        self._init_vars()
        self.reset()
Exemplo n.º 17
0
def adjust_args(args):
    # don't use logging.info before we initialize the logger!! or else stuff is gonna fail

    # dealing with slurm. do this first!! before anything else
    # needs to be before seed setting, so we can set it
    if args.slurm_id is not None:
        from parameters import apply_parameters
        args = apply_parameters(args.slurm_param_path, args)

    # loading from a config file
    if args.config is not None:
        config = load_args(args.config)
        args = update_args(args, config)

    # setting seeds
    if args.res_seed is None:
        args.res_seed = random.randrange(1e6)
    if args.seed is None:
        args.seed = random.randrange(1e6)
    if args.network_seed is None:
        args.network_seed = random.randrange(1e6)

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # TODO
    # in case we are loading from a model
    # if we don't use this we might end up with an error when loading model
    # uses a new seed
    if args.model_path is not None:
        config = get_config(args.model_path)
        args = update_args(args, config,
                           overwrite=None)  # overwrite Nones only
        enforce_same = ['N', 'D1', 'D2', 'net', 'res_bias', 'use_reservoir']
        for v in enforce_same:
            if v in config and args.__dict__[v] != config[v]:
                print(
                    f'Warning: based on config, changed {v} from {args.__dict__[v]} -> {config[v]}'
                )
                args.__dict__[v] = config[v]

    # shortcut for specifying train everything including reservoir
    if args.train_parts == ['all']:
        args.train_parts = ['']

    # shortcut for training in designated order
    if args.sequential and len(args.train_order) == 0:
        args.train_order = list(range(len(args.dataset)))

    # TODO
    if 'rsg' in args.dataset[0]:
        args.out_act = 'exp'
    else:
        args.out_act = 'none'

    # number of task variables, latent variables, and output variables
    args.T = len(args.dataset)
    L, Z = 0, 0
    for dset in args.dataset:
        config = get_config(dset, ctype='dset', to_bunch=True)
        L = max(L, config.L)
        Z = max(Z, config.Z)
    args.L = L
    args.Z = Z

    # initializing logging
    # do this last, because we will be logging previous parameters into the config file
    if not args.no_log:
        if args.slurm_id is not None:
            log = log_this(args,
                           'logs',
                           os.path.join(
                               args.name.split('_')[0],
                               args.name.split('_')[1]),
                           checkpoints=args.log_checkpoint_models)
        else:
            log = log_this(args,
                           'logs',
                           args.name,
                           checkpoints=args.log_checkpoint_models)

        logging.basicConfig(format='%(message)s',
                            filename=log.run_log,
                            level=logging.DEBUG)
        console = logging.StreamHandler()
        console.setLevel(logging.DEBUG)
        logging.getLogger('').addHandler(console)
        args.log = log
    else:
        logging.basicConfig(format='%(message)s', level=logging.DEBUG)
        logging.info('NOT LOGGING THIS RUN.')

    # logging, when loading models from paths
    if args.model_path is not None:
        logging.info(f'Using model path {args.model_path}')
        if args.model_config_path is not None:
            logging.info(f'...with config file {args.model_config_path}')
        else:
            logging.info(
                '...but not using any config file. Errors may ensue due to net param mismatches'
            )

    return args
Exemplo n.º 18
0
                        default=None,
                        help='select from rsg intervals')
    # delay memory pro anti preset angles
    parser.add_argument('--angles',
                        nargs='*',
                        type=float,
                        default=None,
                        help='angles in degrees for dmpa tasks')

    args = parser.parse_args()
    if args.config is not None:
        # if using config file, load args from config, ignore everything else
        config_args = load_args(args.config)
        del config_args.name
        del config_args.config
        args = update_args(args, config_args)
    else:
        # add task-specific arguments. shouldn't need to do this if loading from config file
        task_args = get_task_args(args)
        args = update_args(args, task_args)

    args.argv = ' '.join(sys.argv)

    if args.mode == 'create':
        # create and save a dataset
        dset, config = create_dataset(args)
        save_dataset(dset, args.name, config=config)
    elif args.mode == 'load':
        # visualize a dataset
        dset = load_rb(args.name)
        t_type = type(dset[0])
Exemplo n.º 19
0
parser.add_argument('-m', '--m_noise', default=None, type=float)
parser.add_argument('--x_noise', default=None, type=float)
parser.add_argument('-x', '--reservoir_x_init', default=None, type=str)
parser.add_argument('-a', '--test_all', action='store_true')
parser.add_argument('-n', '--no_plot', action='store_true')
parser.add_argument('-c',
                    '--config',
                    default=None,
                    help='path to config file if custom')
args = parser.parse_args()

if args.config is None:
    config = get_config(args.model, ctype='model')
else:
    config = json.load(open(args.config, 'r'))
config = update_args(args, config)
dsets = config.dataset

net = load_model_path(args.model, config=config)
# assuming config is in the same folder as the model

if args.test_all:
    _, loss = test_model(net, config)
    print('avg summed loss (all):', loss)

if not args.no_plot:
    data, t_losses = test_model(net, config, n_tests=12)
    print('avg losses:')
    for t, j in t_losses.items():
        print(t + ': ' + str(j))
    run_id = '/'.join(args.model.split('/')[-3:-1])