def save_replay(self, replay_data, replay_dir, prefix=None): """Save a replay to a directory, returning the path to the replay. Args: replay_data: The result of controller.save_replay(), ie the binary data. replay_dir: Where to save the replay. This can be absolute or relative. prefix: Optional prefix for the replay filename. Returns: The full path where the replay is saved. Raises: ValueError: If the prefix contains the path seperator. """ if not prefix: replay_filename = "" elif os.path.sep in prefix: raise ValueError( "Prefix '%s' contains '%s', use replay_dir instead." % (prefix, os.path.sep)) else: replay_filename = prefix + "_" now = datetime.datetime.utcnow().replace(microsecond=0) replay_filename += "%s.SC2Replay" % now.isoformat("-").replace( ":", "-") replay_dir = self.abs_replay_path(replay_dir) if not gfile.Exists(replay_dir): gfile.MakeDirs(replay_dir) replay_path = os.path.join(replay_dir, replay_filename) with gfile.Open(replay_path, "wb") as f: f.write(replay_data) return replay_path
def map_data(self, map_name, players=None): """Return the map data for a map by name or path.""" map_names = [map_name] if players: map_names.append( os.path.join(os.path.dirname(map_name), "(%s)%s" % (players, os.path.basename(map_name)))) for name in map_names: path = os.path.join(self.data_dir, "Maps", name) if gfile.Exists(path): with gfile.Open(path, "rb") as f: return f.read() raise ValueError("Map '%s' not found." % map_name)
def main(unused_argv): """Dump stats about all the actions that are in use in a set of replays.""" run_config = run_configs.get() if not gfile.Exists(FLAGS.replays): sys.exit("{} doesn't exist.".format(FLAGS.replays)) stats_queue = multiprocessing.Queue() stats_thread = threading.Thread(target=stats_printer, args=(stats_queue, )) try: # For some reason buffering everything into a JoinableQueue makes the # program not exit, so save it into a list then slowly fill it into the # queue in a separate thread. Grab the list synchronously so we know there # is work in the queue before the SC2 processes actually run, otherwise # The replay_queue.join below succeeds without doing any work, and exits. print("Getting replay list:", FLAGS.replays) replay_list = sorted(run_config.replay_paths(FLAGS.replays)) print(len(replay_list), "replays found.") if not replay_list: return if not FLAGS["sc2_version"].present: # ie not set explicitly. version = replay.get_replay_version( run_config.replay_data(replay_list[0])) run_config = run_configs.get(version=version) print("Assuming version:", version.game_version) print() stats_thread.start() replay_queue = multiprocessing.JoinableQueue(FLAGS.parallel * 10) replay_queue_thread = threading.Thread(target=replay_queue_filler, args=(replay_queue, replay_list)) replay_queue_thread.daemon = True replay_queue_thread.start() for i in range(min(len(replay_list), FLAGS.parallel)): p = ReplayProcessor(i, run_config, replay_queue, stats_queue) p.daemon = True p.start() time.sleep( 1) # Stagger startups, otherwise they seem to conflict somehow replay_queue.join() # Wait for the queue to empty. except KeyboardInterrupt: print("Caught KeyboardInterrupt, exiting.") finally: stats_queue.put(None) # Tell the stats_thread to print and exit. if stats_thread.is_alive(): stats_thread.join()
def start(self): run_config = run_configs.get() # input replay does not exist if not gfile.Exists(FLAGS.replays): sys.exit("{} doesn't exist.".format(FLAGS.replays)) stats_queue = multiprocessing.Queue() stats_thread = threading.Thread(target=stats_printer, args=(stats_queue, )) stats_thread.start() try: # For some reason buffering everything into a JoinableQueue makes the # program not exit, so save it into a list then slowly fill it into the # queue in a separate thread. Grab the list synchronously so we know there # is work in the queue before the SC2 processes actually run, otherwise # The replay_queue.join below succeeds without doing any work, and exits. print("Getting replay list:", FLAGS.replays) replay_list = sorted(run_config.replay_paths(FLAGS.replays)) print(len(replay_list), "replays found.\n") replay_queue = multiprocessing.JoinableQueue(FLAGS.parallel * 10) replay_queue_thread = threading.Thread( target=replay_actions.replay_queue_filler, args=(replay_queue, replay_list)) replay_queue_thread.daemon = True replay_queue_thread.start() for i in range(FLAGS.parallel): p = replay_actions.ReplayProcessor(i, run_config, replay_queue, stats_queue) p.daemon = True p.start() time.sleep( 1 ) # Stagger startups, otherwise they seem to conflict somehow replay_queue.join() # Wait for the queue to empty. except KeyboardInterrupt: print("Caught KeyboardInterrupt, exiting.") finally: stats_queue.put(None) # Tell the stats_thread to print and exit. stats_thread.join()
def save_replay(self, replay_data, replay_dir, map_name): """Save a replay to a directory, returning the path to the replay. Args: replay_data: The result of controller.save_replay(), ie the binary data. replay_dir: Where to save the replay. This can be absolute or relative. map_name: The map name, used as a prefix for the replay name. Returns: The full path where the replay is saved. """ now = datetime.datetime.utcnow().replace(microsecond=0) replay_filename = "%s_%s.SC2Replay" % (os.path.splitext( os.path.basename(map_name))[0], now.isoformat("-").replace( ":", "-")) replay_dir = self.abs_replay_path(replay_dir) if not gfile.Exists(replay_dir): gfile.MakeDirs(replay_dir) replay_path = os.path.join(replay_dir, replay_filename) with gfile.Open(replay_path, "wb") as f: f.write(replay_data) return replay_path
def main(): """Dump stats about all the actions that are in use in a set of replays.""" run_config = run_configs.get() filter_file = FLAGS.filter_file if not gfile.Exists(filter_file): sys.exit("{} doesn't exist.".format(filter_file)) print("Generating replay list using:", filter_file) # Instantiate empty replay list replay_list = [] # Import filter JSON filtered_data = json.load(open(filter_file)) # Identify replay directory from filter replay_dir = filtered_data['ReplaysDirectory'] # Iterate through replays denoted in filter for replay in filtered_data['Replays']: for key in replay: if key: # Generate the replay filename replay_file = os.path.join(replay_dir, key + '.SC2Replay') # Append replay file path to replay list replay_list.append(replay_file) # Ensure the path for the states directory exists # else create it. if not os.path.exists(FLAGS.states_dir): os.makedirs(FLAGS.states_dir) stats_queue = multiprocessing.Queue() stats_thread = threading.Thread(target=stats_printer, args=(stats_queue, FLAGS.parallel, FLAGS.print_time)) stats_thread.start() try: # For some reason buffering everything into a JoinableQueue makes the # program not exit, so save it into a list then slowly fill it into the # queue in a separate thread. Grab the list synchronously so we know there # is work in the queue before the SC2 processes actually run, otherwise # The replay_queue.join below succeeds without doing any work, and exits. replay_queue = multiprocessing.JoinableQueue(FLAGS.parallel * 10) replay_queue_thread = threading.Thread(target=replay_queue_filler, args=(replay_queue, replay_list)) replay_queue_thread.daemon = True replay_queue_thread.start() for i in range(FLAGS.parallel): p = ReplayProcessor(i, run_config, replay_queue, stats_queue, FLAGS.step_mul, FLAGS.states_dir, FLAGS.winner_only) p.daemon = True p.start() time.sleep( 1) # Stagger startups, otherwise they seem to conflict somehow replay_queue.join() # Wait for the queue to empty. except KeyboardInterrupt: print("Caught KeyboardInterrupt, exiting.") finally: stats_queue.put(None) # Tell the stats_thread to print and exit. stats_thread.join()