Example #1
0
File: logger.py Project: cknd/rllab
 def refresh(self):
     import os
     rows, columns = os.popen('stty size', 'r').read().split()
     tabulars = self.tabulars[-(int(rows) - 3):]
     sys.stdout.write("\x1b[2J\x1b[H")
     sys.stdout.write(tabulate(tabulars, self.headers))
     sys.stdout.write("\n")
 def refresh(self):
     import os
     rows, columns = os.popen('stty size', 'r').read().split()
     tabulars = self.tabulars[-(int(rows) - 3):]
     sys.stdout.write("\x1b[2J\x1b[H")
     sys.stdout.write(tabulate(tabulars, self.headers))
     sys.stdout.write("\n")
Example #3
0
def dump_tabular(*args, **kwargs):
    wh = kwargs.pop("write_header", None)
    if len(_tabular) > 0:
        if _log_tabular_only:
            table_printer.print_tabular(_tabular)
        else:
            for line in tabulate(_tabular).split('\n'):
                log(line, *args, **kwargs)
        tabular_dict = dict(_tabular)

        # write to the tensorboard folder
        # This assumes that the keys in each iteration won't change!
        dump_tensorboard(args, kwargs)

        # Also write to the csv files
        # This assumes that the keys in each iteration won't change!
        for tabular_fd in list(_tabular_fds.values()):
            writer = csv.DictWriter(tabular_fd,
                                    fieldnames=list(tabular_dict.keys()))
            if wh or (wh is None
                      and tabular_fd not in _tabular_header_written):
                writer.writeheader()
                _tabular_header_written.add(tabular_fd)
            writer.writerow(tabular_dict)
            tabular_fd.flush()
        del _tabular[:]
Example #4
0
def dump_tabular(*args, **kwargs):
    if not _disabled:  # and not _tabular_disabled:
        wh = kwargs.pop("write_header", None)
        if len(_tabular) > 0:
            if _log_tabular_only:
                table_printer.print_tabular(_tabular)
            else:
                for line in tabulate(_tabular).split('\n'):
                    log(line, *args, **kwargs)
            if not _tabular_disabled:
                tabular_dict = dict(_tabular)
                # Also write to the csv files
                # This assumes that the keys in each iteration won't change!
                for tabular_file_name, tabular_fd in list(
                        _tabular_fds.items()):
                    keys = tabular_dict.keys()
                    if tabular_file_name in _tabular_headers:
                        # check against existing keys: if new keys re-write Header and pad with NaNs
                        existing_keys = _tabular_headers[tabular_file_name]
                        if not set(existing_keys).issuperset(set(keys)):
                            joint_keys = set(keys).union(set(existing_keys))
                            tabular_fd.flush()
                            read_fd = open(tabular_file_name, 'r')
                            reader = csv.DictReader(read_fd)
                            rows = list(reader)
                            read_fd.close()
                            tabular_fd.close()
                            tabular_fd = _tabular_fds[
                                tabular_file_name] = open(
                                    tabular_file_name, 'w')
                            new_writer = csv.DictWriter(
                                tabular_fd, fieldnames=list(joint_keys))
                            new_writer.writeheader()
                            for row in rows:
                                for key in joint_keys:
                                    if key not in row:
                                        row[key] = np.nan
                            new_writer.writerows(rows)
                            _tabular_headers[tabular_file_name] = list(
                                joint_keys)
                    else:
                        _tabular_headers[tabular_file_name] = keys

                    writer = csv.DictWriter(
                        tabular_fd,
                        fieldnames=_tabular_headers[tabular_file_name]
                    )  # list(
                    if wh or (wh is None and tabular_file_name
                              not in _tabular_header_written):
                        writer.writeheader()
                        _tabular_header_written.add(tabular_file_name)
                        _tabular_headers[tabular_file_name] = keys
                    # add NaNs in all empty fields from the header
                    for key in _tabular_headers[tabular_file_name]:
                        if key not in tabular_dict:
                            tabular_dict[key] = np.nan
                    writer.writerow(tabular_dict)
                    tabular_fd.flush()
            del _tabular[:]
Example #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('filename', type=str)
    parser.add_argument('--vid', type=str, default='madrl.mp4')
    parser.add_argument('--deterministic', action='store_true', default=False)
    parser.add_argument('--heuristic', action='store_true', default=False)
    parser.add_argument('--evaluate', action='store_true', default=True)
    parser.add_argument('--n_trajs', type=int, default=20)
    parser.add_argument('--n_steps', type=int, default=20)
    parser.add_argument('--same_con_pol', action='store_true')
    args = parser.parse_args()

    fh = FileHandler(args.filename)

    if fh.train_args['map_file'] is not None:
        map_pool = np.load(
            os.path.join('', os.path.basename(fh.train_args[
                'map_file'])))
    else:
        if fh.train_args['map_type'] == 'rectangle':
            env_map = TwoDMaps.rectangle_map(*map(int, fh.train_args['map_size'].split(',')))
        elif args.map_type == 'complex':
            env_map = TwoDMaps.complex_map(*map(int, fh.train_args['map_size'].split(',')))
        else:
            raise NotImplementedError()
        map_pool = [env_map]

    env = PursuitEvade(map_pool, n_evaders=fh.train_args['n_evaders'],
                       n_pursuers=fh.train_args['n_pursuers'], obs_range=fh.train_args['obs_range'],
                       n_catch=fh.train_args['n_catch'], urgency_reward=fh.train_args['urgency'],
                       surround=bool(fh.train_args['surround']),
                       sample_maps=bool(fh.train_args['sample_maps']),
                       flatten=bool(fh.train_args['flatten']), reward_mech='global',
                       catchr=fh.train_args['catchr'], term_pursuit=fh.train_args['term_pursuit'])

    if fh.train_args['buffer_size'] > 1:
        env = ObservationBuffer(env, fh.train_args['buffer_size'])

    hpolicy = None
    if args.evaluate:
        minion = Evaluator(env, fh.train_args, args.n_steps, args.n_trajs, args.deterministic,
                           'heuristic' if args.heuristic else fh.mode)
        evr = minion(fh.filename, file_key=fh.file_key, same_con_pol=args.same_con_pol,
                     hpolicy=hpolicy)
        print(evr)
        print(tabulate(evr, headers='keys'))
    else:
        minion = Visualizer(env, fh.train_args, args.n_steps, args.n_trajs, args.deterministic,
                            fh.mode)
        rew, info = minion(fh.filename, file_key=fh.file_key, vid=args.vid)
        pprint.pprint(rew)
        pprint.pprint(info)
Example #6
0
def dump_tabular(*args, **kwargs):
    if len(_tabular) > 0:
        if _log_tabular_only:
            table_printer.print_tabular(_tabular)
        else:
            for line in tabulate(_tabular).split('\n'):
                log(line, *args, **kwargs)
        tabular_dict = dict(_tabular)
        # Also write to the csv files
        # This assumes that the keys in each iteration won't change!
        for tabular_fd in _tabular_fds.values():
            writer = csv.DictWriter(tabular_fd, fieldnames=tabular_dict.keys())
            if tabular_fd not in _tabular_header_written:
                writer.writeheader()
                _tabular_header_written.add(tabular_fd)
            writer.writerow(tabular_dict)
            tabular_fd.flush()
        del _tabular[:]
Example #7
0
File: logger.py Project: cknd/rllab
def dump_tabular(*args, **kwargs):
    if len(_tabular) > 0:
        if _log_tabular_only:
            table_printer.print_tabular(_tabular)
        else:
            for line in tabulate(_tabular).split('\n'):
                log(line, *args, **kwargs)
        tabular_dict = dict(_tabular)
        # Also write to the csv files
        # This assumes that the keys in each iteration won't change!
        for tabular_fd in list(_tabular_fds.values()):
            writer = csv.DictWriter(tabular_fd, fieldnames=list(tabular_dict.keys()))
            if tabular_fd not in _tabular_header_written:
                writer.writeheader()
                _tabular_header_written.add(tabular_fd)
            writer.writerow(tabular_dict)
            tabular_fd.flush()
        del _tabular[:]