Beispiel #1
0
def iter_data(input_file, **learn_args):
    replay_names = [line.rstrip('\r\n') for line in open(input_file)]
    random.shuffle(replay_names)
    batch_size = learn_args['batch_size']
    for index, replay_name in enumerate(replay_names):
        try:
            replay = from_local(replay_name) if learn_args['local_replays'] \
                else from_s3(replay_name)
        except:
            continue

        if replay.num_frames < 10:
            log(logger.info, 'Skipping:', replay_name, '#frames:',
                replay.num_frames)
            continue

        log(logger.info, 'Replay:', replay_name, '(', (index + 1), '/',
            len(replay_names), ')', 'Winner:',
            replay.player_names[replay.winner - 1])
        X_train, Y_train, X_test, Y_test = \
            get_train_test_data(replay, replay.winner, **learn_args)

        for start in range(0, X_train.shape[0], batch_size):
            begin, end = start, start + batch_size
            yield X_train[begin:end], Y_train[begin:end], True
        yield X_test, Y_test, False
def repo_full(con, std_tables, stoptime, offset):
    log("initializing repositories table")
    initialize_repo_table(con)
    con.commit()
    log("aggregating repositories")
    aggregate_repository(con, stoptime, offset)
    con.commit()
Beispiel #3
0
def learn_from_single_replay(input_file, **learn_args):
    replay = from_local(input_file) if learn_args['local_replays'] else \
        from_s3(input_file)

    X_train, Y_train, X_test, Y_test = \
        get_train_test_data(replay, replay.winner, **learn_args)

    # create model
    model = create_model(X_train, **learn_args)
    model.fit(X_train,
              Y_train,
              nb_epoch=20,
              verbose=1,
              callbacks=[
                  ModelCheckpoint(filepath='%s.h5' %
                                  learn_args['model_prefix'],
                                  monitor='val_acc',
                                  save_best_only=True,
                                  mode='max',
                                  verbose=0)
              ],
              validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, verbose=0)
    log(logger.info, 'Test score:', score[0])
    log(logger.info, 'Test accuracy:', score[1])
def extract_data(csv_files):
    headers = None
    rows = []

    file_count = 0
    for csv_file in csv_files:
        line_count = 0
        with open(csv_file, mode='r') as current_file:
            csv_reader = csv.reader(current_file,
                                    delimiter=INPUT_DELIMITER,
                                    quotechar=INPUT_QUOTECHAR)
            for row in csv_reader:
                if line_count == 0 and headers == None:
                    headers = row

                else:
                    rows.append(row)

                line_count += 1
                log('PROCESSED {} LINES IN {}'.format(line_count, csv_file))

        file_count += 1
        log('COMPLETED PROCESSING FILE {} ({})'.format(file_count, csv_file))

    return (headers, rows)
    def __init__(self,
                 sounds_directory: str = SOUNDS_DIRECTORY,
                 sound_on: bool = True,
                 music_on: bool = True,
                 sound_effects_on: bool = True):
        """
        AudioPlayer is a singleton.

        :param sounds_directory: str -- name of the directory without path
        :param sound_on: bool -- if sounds should be played or not
        """
        self._music_on = music_on
        self._sound_effects_on = sound_effects_on

        self.volume: float = 1.0
        self.music_volume: float = self.volume
        self.effects_volume: float = self.volume

        self.sounds: Dict[str, Sound] = self._preload_sounds(sounds_directory)
        self.currently_played: List[Player] = []
        self.current_music: Optional[Player] = None
        log(f'Loaded {len(self.sounds)} sounds.', console=True)

        self.paused_track_name: Optional[str] = None
        self.paused_track_time: Optional[float] = None

        self.playlists: Dict[str, List[str]] = self._setup_playlists()
        self.current_playlist: Optional[List[str]] = None
        self.playlist_index: int = 0

        AudioPlayer.instance = self

        log(f'Found {len(self.playlists)} playlists', console=True)
Beispiel #6
0
    def command_wrapper(cls):
        if not args['variants']:
            log('\t\tWarning: The {:s} command has no valid call variation.',
                cls.__name__)
        cls.args = args

        return cls
Beispiel #7
0
    def __init__(self, map_settings: Dict):
        MapNode.map = Sector.map = Map.instance = self
        self.rows = map_settings['rows']
        self.columns = map_settings['columns']
        self.grid_width = map_settings['grid_width']
        self.grid_height = map_settings['grid_height']
        self.width = self.columns * self.grid_width
        self.height = self.rows * self.grid_height

        try:
            self.nodes_data = map_settings['nodes']
        except KeyError:
            self.nodes_data = {}

        # map is divided for sectors containing 10x10 Nodes each to split
        # space for smaller chunks in order to make enemies-detection
        # faster: since each Unit could only scan it's current Sector and
        # adjacent ones instead of whole map for enemies:
        self.sectors: Dict[SectorId, Sector] = {}
        self.nodes: Dict[GridPosition, MapNode] = {}

        self.generate_sectors()
        self.generate_nodes()
        self.calculate_distances_between_nodes()

        try:
            trees = map_settings['trees']
            self.game.after_load_functions.append(partial(self.plant_trees, trees))
        except KeyError:
            self.game.after_load_functions.append(self.plant_trees)

        log('Map was initialized successfully...', console=True)
 def __init__(self):
     """
     :param configs: Dict -- data read from the CSV files in configs dir.
     """
     self.pathfinder = self.game.pathfinder
     self.configs: Dict[str, Dict[str, Dict[str, Any]]] = self.game.configs
     log(f'GameObjectsSpawner was initialized successfully...', console=True)
Beispiel #9
0
 def generate_sectors(self):
     for x in range(self.columns):
         sector_x = x // SECTOR_SIZE
         for y in range(self.rows):
             sector_y = y // SECTOR_SIZE
             self.sectors[(sector_x, sector_y)] = Sector((sector_x, sector_y))
     log(f'Created {len(self.sectors)} map sectors.', console=True)
Beispiel #10
0
    def _init(self, scale):
        """
        recreate the database (drop + create) and populate it with given scale
        """

        # initialize results for this dataset scale
        self._results['results'] = {
            'init': None,
            'runs': [],
            'warmup': None,
        }

        log("recreating '%s' database" % (self._dbname,))
        run_cmd(['dropdb', '--if-exists', self._dbname], env=self._env)
        run_cmd(['createdb', self._dbname], env=self._env)

        log("initializing pgbench '%s' with scale %s" % (self._dbname, scale))

        r = run_cmd(['pgbench', '-s', str(scale), '-h', SOCKET_PATH, '-p', '5432', self._dbname], env=self._env, cwd=self._outdir)

        with open(BASE_PATH + '/pgbench_log.txt', 'w+') as file:
            file.write("pgbench log: \n")
            file.write(r[1].decode("utf-8"))

        #r = run_cmd(['pgbench', self._dbname, '-r'], env=self._env, cwd=self._outdir)

        # remember the init duration
        self._results['results']['init'] = r[2]
 def _load_bundle(self, bundle: UiElementsBundle):
     log(f'LOADING BUNDLE: {bundle.name}')
     bundle.on_load()
     bundle.displayed_in_manager = self
     self.active_bundles.add(bundle.index)
     self.ui_elements_spritelist.extend(bundle.elements)
     self.bind_ui_elements_with_ui_spritelist(bundle.elements)
Beispiel #12
0
    def build_and_install(self, path, remove=True):
        'builds and installs the sources'

        # TODO collect output of configure and make commands
        if os.path.exists(path):
            shutil.rmtree(path)

        with TemporaryFile() as strout:
            log("configuring sources in '%s' with prefix '%s'" %
                (self._path, path))
            call(['./configure', '--prefix', path], cwd=self._path,
                 stdout=strout, stderr=STDOUT)

        with TemporaryFile() as strout:
            log("building sources and installing into '%s'" % (path,))

            # cleanup and build using multiple cpus
            call(['make', '-s', 'clean'], cwd=self._path, stdout=strout,
                 stderr=STDOUT)
            call(['make', '-s', '-j', str(cpu_count()), 'install'],
                 cwd=self._path, stdout=strout, stderr=STDOUT)

            # Install pgbench from contrib in the older versions
            oldpgbenchdir = ''.join([self._path, '/', 'contrib/pgbench'])
            if os.path.isdir(oldpgbenchdir):
                call(['make', '-s', '-j', str(cpu_count()), 'install'],
                     cwd=oldpgbenchdir, stdout=strout, stderr=STDOUT)
Beispiel #13
0
    def _collect_sysctl(self):
        'collect kernel configuration'

        log("collecting sysctl")
        r = run_cmd(['sysctl', '-a'], env=self._env)

        return r[1]
def contributor_full(con, std_tables, stoptime, offset):
    log("initializing users table")
    initialize_user_table(con, std_tables)
    con.commit()
    log("aggregating contributors")
    aggregate_contributor(con, stoptime, offset)
    con.commit()
Beispiel #15
0
    def _clone(self):
        ''
        log("cloning repository '%s' to '%s'" % (self._url, self._path))

        with TemporaryFile() as strout:
            call(['git', 'clone', self._url, self._path], stdout=strout,
                 stderr=STDOUT)
Beispiel #16
0
    def _collect_system_info(self):
        'collect cpuinfo, meminfo, mounts'

        log("Collecting system info")

        system = {}
        system['cpu'] = {}
        system['os'] = {}
        system['memory'] = {}
        system['disk'] = {}
        system['process'] = {}
        system['compiler'] = {}

        system['compiler']['make'] = run_cmd(['make', '--version'], env=self._env)
        system['compiler']['gcc'] = run_cmd(['gcc', '--version'], env=self._env)

        system['cpu']['information'] = get_cpu_info()
        system['cpu']['number'] = psutil.cpu_count()
        system['cpu']['times'] = psutil.cpu_times(percpu=False)
        system['cpu']['percent'] = psutil.cpu_times_percent(percpu=False)
        system['cpu']['stats'] = psutil.cpu_stats()
        system['cpu']['load_avg'] = psutil.getloadavg()

        system['os']['architecture'] = platform.architecture()
        system['os']['processor'] = platform.processor()
        system['os']['release'] = platform.release()
        system['os']['version'] = platform.version()
        system['os']['libc'] = platform.libc_ver()

        system['memory']['virtual'] = psutil.virtual_memory()
        system['memory']['swap'] = psutil.swap_memory()
        system['memory']['mounts'] = psutil.disk_partitions()

        system['disk']['usage'] = psutil.disk_usage('/')
        system['disk']['io'] = psutil.disk_io_counters(perdisk=False, nowrap=True)

        process = psutil.Process()
        system['process']['io'] = process.io_counters()
        system['process']['context_switches'] = process.num_ctx_switches()
        system['process']['cpu_times'] = process.cpu_times()
        system['process']['threads'] = process.num_threads()
        system['process']['cpu_percent'] = process.cpu_percent()
        system['process']['memory'] = process.memory_info()
        system['process']['memory_percent'] = process.memory_percent()


        # files to be uploaded and saved somewhere
        '''
        with open('/proc/cpuinfo', 'r') as f:
            system['cpuinfo'] = f.read()

        with open('/proc/meminfo', 'r') as f:
            system['meminfo'] = f.read()

        with open('/proc/mounts', 'r') as f:
            system['mounts'] = f.read()
        '''

        return system
    def _run(self,
             run,
             scale,
             duration,
             nclients=1,
             njobs=1,
             read_only=False,
             aggregate=True,
             csv_queue=None):
        'run pgbench on the database (either a warmup or actual benchmark run)'

        # Create a separate directory for each pgbench run
        if read_only:
            rtag = "ro"
        else:
            rtag = "rw"
        rdir = "%s/pgbench-%s-%d-%d-%s" % (self._outdir, rtag, scale, nclients,
                                           str(run))
        if not (os.path.exists(rdir)):
            os.mkdir(rdir)

        args = [
            'pgbench', '-c',
            str(nclients), '-j',
            str(njobs), '-T',
            str(duration)
        ]

        # aggregate on per second resolution
        if aggregate:
            args.extend(['-l', '--aggregate-interval', '1'])

        if read_only:
            args.extend(['-S'])

        args.extend([self._dbname])

        # do an explicit checkpoint before each run
        run_cmd(['psql', self._dbname, '-c', 'checkpoint'], env=self._env)

        log("pgbench: clients=%d, jobs=%d, aggregate=%s, read-only=%s, "
            "duration=%d" % (nclients, njobs, aggregate, read_only, duration))

        start = time.time()
        r = run_cmd(args, env=self._env, cwd=rdir)
        end = time.time()

        r = PgBench._parse_results(r[1])
        r.update({'read-only': read_only})

        r.update({'start': start, 'end': end})

        if csv_queue is not None:
            csv_queue.put([
                start, end, r['scale'], nclients, njobs, mode, duration,
                latency, tps
            ])

        return r
def execute(directory):
    if directory[-1] != '/':
        directory = directory + '/'

    files = get_filenames(directory)
    headers, rows = extract_data(files)
    write_data(headers, rows, directory)
    log('PROCESSING COMPLETE')
 def rename_saved_game(self, old_name: str, new_name: str):
     try:
         new = os.path.join(self.saves_path, new_name)
         os.rename(self.saved_games[old_name], new)
         self.saved_games[new_name] = new
         del self.saved_games[old_name]
     except Exception as e:
         log(f'{str(e)}', console=True)
 def stop(self):
     log("stopping collectd")
     try:
         pidfile = open(COLLECTD_PIDFILE, 'r')
         pid = pidfile.read().strip()
         run_cmd(['kill', pid])
     except FileNotFoundError:
         log('collectd pid not found - processes may still be running')
    def enabled(self, b):
        if b == self._enabled:
            return

        if b:
            log('Enabled the {:s} command.', self.args['name'])
        else:
            log('Disabled the {:s} command.', self.args['name'])
        self._enabled = b
Beispiel #22
0
    def clone_or_update(self):
        'refreshes the repository (either clone from scratch or refresh)'

        if self._exists():
            self._update()
        else:
            self._clone()

        log("current commit '%s'" % (self.current_commit(),))
Beispiel #23
0
 def generate_nodes(self):
     for x in range(self.columns):
         sector_x = x // SECTOR_SIZE
         for y in range(self.rows):
             sector_y = y // SECTOR_SIZE
             sector = self.sectors[(sector_x, sector_y)]
             self.nodes[(x, y)] = node = MapNode(x, y, sector)
             self.create_map_sprite(*node.position)
     log(f'Generated {len(self.nodes)} map nodes.', console=True)
Beispiel #24
0
    def _update(self):
        'update an existing repository clone'

        log("updating repository '%s' from '%s'" % (self._path, self._url))

        # simply call git-pull and redirect stdout/stderr
        # FIXME should verify that the repository uses the proper upstream url
        with TemporaryFile() as strout:
            call(['git', 'pull'], cwd=self._path, stdout=strout, stderr=STDOUT)
Beispiel #25
0
    def _initdb(self):
        'initialize the data directory'

        with TemporaryFile() as strout:
            log("initializing cluster into '%s'" % (self._data, ))
            call(['pg_ctl', '-D', self._data, 'init'],
                 env=self._env,
                 stdout=strout,
                 stderr=STDOUT)
 def play_sound(self, name: str, volume: Optional[float] = None):
     """Play a single sound. Use this for sound effects."""
     if name not in self.sounds:
         log(f'Sound: {name} not found!', console=True)
     if self.is_music(name) and not self._music_on:
         return
     elif not self._sound_effects_on:
         return
     self._play_sound(name, loop=False, volume=volume)
    def enabled(self, b):
        if b == self._enabled:
            return

        if b:
            log('Enabled the {:s} command.', self.args['name'])
        else:
            log('Disabled the {:s} command.', self.args['name'])
        self._enabled = b
Beispiel #28
0
    def run_tests(self, csv_queue):
        """
        execute the whole benchmark, including initialization, warmup and
        benchmark runs
        """

        # derive configuration for the CPU count / RAM size
        configs = PgBench._configure(cpu_count(), available_ram())

        results = {'ro': {}, 'rw': {}}  #ro:read only  rw:read-write
        j = 0
        for config in configs:
            scale = config['scale']

            if scale not in results['ro']:
                results['ro'][scale] = {}
            if scale not in results['rw']:
                results['rw'][scale] = {}
            #print(results)

            # init for the dataset scale and warmup
            self._init(scale)

            warmup = self._run('w%d' % j, scale, self._duration, cpu_count(),
                               cpu_count())
            j += 1

            # read-only & read-write
            for ro in [True, False]:
                if ro:
                    tag = 'ro'
                else:
                    tag = 'rw'

                for i in range(self._runs):
                    log("pgbench: %s run=%d" % (tag, i))

                    for clients in config['clients']:
                        if clients not in results[tag][scale]:
                            results[tag][scale][clients] = {}
                            results[tag][scale][clients]['results'] = []

                        r = self._run(i, scale, self._duration, clients,
                                      clients, ro, True, csv_queue)
                        r.update({'run': i})
                        results[tag][scale][clients]['results'].append(r)

                        tps = []
                        for result in results[tag][scale][clients]['results']:
                            tps.append(float(result['tps']))
                        results[tag][scale][clients]['metric'] = mean(tps)
                        results[tag][scale][clients]['median'] = median(tps)
                        results[tag][scale][clients]['std'] = std(tps)

        self._results['pgbench'] = results
        return self._results
Beispiel #29
0
async def update_resource(db_object: Base) -> bool:
    try:
        session.add(db_object)
        session.commit()
        session.flush()
        return True
    except exc.SQLAlchemyError as err:
        logging.log('info', str(err))
        print(err)
        return False
def get_filenames(directory):
    files = os.listdir(directory)
    valid_files = [directory + x for x in files if '.csv' in x]
    for v in valid_files:
        log('FOUND {}'.format(v))

    if len(valid_files) == 0:
        raise Exception(
            'No CSV files found in target directory {}'.format(directory))

    return (valid_files)
 def load_game(self, save_name: str):
     full_save_path = self.get_full_path_to_file_with_extension(save_name)
     with shelve.open(full_save_path) as file:
         for name in ('timer', 'settings', 'viewports', 'map', 'factions',
                      'players', 'local_human_player', 'units', 'buildings',
                      'mission', 'permanent_units_groups', 'fog_of_war',
                      'mini_map'):
             log(f'Loading: {name}...', console=True)
             yield eval(f"self.load_{name}(file['{name}'])")
     log(f'Game {save_name} loaded successfully!', True)
     yield
    def trigger(self, trig, *args, **kwargs):
        if '<catch_all>' in self.triggers:
            for method in self.triggers['<catch_all>']:
                method(*args, **kwargs)

        if trig in self.triggers:
            for method in self.triggers[trig]:
                method(*args, **kwargs)
        elif '<catch_unknown>' in self.triggers:
            for method in self.triggers['<catch_unknown>']:
                method(*args, **kwargs)
        elif not '<catch_all>' in self.triggers:
            log('Unknown trigger {0}.', trig)
Beispiel #33
0
def command(*variants, split=None, **kwargs):
    for variant in variants:
        if Command.exists(variant):
            log('\t\tThere already exists a command with the name {:s}.', variant)

    args = {'variants': [variant for variant in list(variants) if not Command.exists(variant)], 'split': split}
    args.update(kwargs)

    def command_wrapper(cls):
        if not args['variants']:
            log('\t\tWarning: The {:s} command has no valid call variation.', cls.__name__)
        cls.args = args

        return cls

    return command_wrapper
 def append_message(self, target, sender, message):
     if target in self._chats:
         self._chats[target].append('<b>{:s}:</b> {:s}\n'.format(sender, message))
     else:
         log('Unknown target {:s}.', target)
 def send(self, msg):
     try:
         self._connection.send(msg)
     except ValueError as e:
         log('Error sending: {:s}', *e.args)
Beispiel #36
0
    def command_wrapper(cls):
        if not args['variants']:
            log('\t\tWarning: The {:s} command has no valid call variation.', cls.__name__)
        cls.args = args

        return cls
def load_plugin(plugin):
    log('\tLoading {:s}:', plugin['spec'].name)
    try:
        return import_module(plugin['spec'].name)
    except ImportError as error:
        log('\t\tImport failed: {:s}', *error.args)
def load_all_plugins():
    log('Loading plugins:')
    for plugin in find_plugins():
        load_plugin(plugin)
    log('Done loading plugins')
 def __init__(cls, name, bases, attrs):
     if not hasattr(cls, 'plugins'):
         cls.plugins = []
     else:
         log('\t\tSuccessfully imported {:s}', name)
         cls.plugins.append(cls)