def get_top_weekly(self): """Get the top weekly replays """ today = datetime.datetime.today() start_week = today - timedelta(days=today.weekday()) start_week_ms = int(start_week.timestamp() * 1000) query = {'req': 'searchquarks', 'best': True, 'since': start_week_ms} replays = [] for i in range(0, 3): query['offset'] = i * 15 r = self.get_data(query) replays += r.json()['results']['results'] for i in replays: if i['gameid'] not in self.config['supported_games']: Logging().info( f"Game {i['gameid']} not supported for replay {i['quarkid']}" ) continue status = self.add_replay(replay=i, emulator=i['emulator'], game=i['gameid'], player_replay=False) if status != 'ADDED': Logging().info( f"Not adding replay {i['quarkid']}, Status: {status}") return ("ADDED")
def get_queue_position(challenge_id): # Returns the 'queue position' for a requested replay if challenge_exists(challenge_id): if get_current_job_id() == challenge_id: return (0) if player_replay(challenge_id): # Get all player replays, then find my position in replays: # Get all unfinished player replays, sorted by date added db = Database() replays = db.get_all_queued_player_replays() position = 0 Logging().debug(f"Looking for player replay {challenge_id}") for replay in replays: Logging().debug(f"Row id: {replay.id}") position += 1 if replay.id == challenge_id: return position # This shouldn't happen, ID was already verified. Maybe database modified while in use? raise IndexError else: # Not a player replay return ("NOT_PLAYER_REPLAY") else: return ("NO_DATA")
def create_dirs(self): # Create directories if they don't exist if not os.path.exists(f"{self.config['fcreplay_dir']}/tmp"): Logging().info('Created tmp dir') os.mkdir(f"{self.config['fcreplay_dir']}/tmp") if not os.path.exists(f"{self.config['fcreplay_dir']}/finished"): Logging().info('Created finished dir') os.mkdir(f"{self.config['fcreplay_dir']}/finished")
def remove_generated_files(self): """Remove generated files Generated files are thumbnail and videofile """ Logging().info("Removing old files") filename = f"{self.replay.id}.mkv" os.remove(f"{self.config['fcreplay_dir']}/finished/{filename}") os.remove(f"{self.config['fcreplay_dir']}/tmp/thumbnail.jpg") self.update_status(status.REMOVED_GENERATED_FILES) Logging().info("Finished removing files")
def wait_for_operation(compute, project, zone, operation): Logging().info('Waiting for operation to finish...') while True: result = compute.zoneOperations().get(project=project, zone=zone, operation=operation).execute() if result['status'] == 'DONE': Logging().info("done.") if 'error' in result: raise Exception(result['error']) return result time.sleep(1)
def create_thumbnail(self): """Create thumbnail from video """ Logging().info("Making thumbnail") filename = f"{self.replay.id}.mkv" subprocess.run([ "ffmpeg", "-ss", "20", "-i", f"{self.config['fcreplay_dir']}/finished/{filename}", "-vframes:v", "1", f"{self.config['fcreplay_dir']}/tmp/thumbnail.jpg" ]) self.update_status(status.THUMBNAIL_CREATED) Logging().info("Finished making thumbnail")
def failed(self, *args, **kwargs): try: return func(self, *args, **kwargs) except Exception as e: trace_back = sys.exc_info()[2] Logging().error( f"Excption: {str(traceback.format_tb(trace_back))}, shutting down" ) Logging().info(f"Setting {self.replay.id} to failed") self.db.update_failed_replay(challenge_id=self.replay.id) self.update_status(status.FAILED) if self.config['gcloud_destroy_on_fail']: Gcloud().destroy_fcreplay(failed=True) sys.exit(1)
def main(self): """The main loop for processing one or more replays """ self.create_dirs() self.clean() if self.debug: Logging().debug(self.config) # If this is google cloud, and the 'destroying' file exists, remove it if self.gcloud and os.path.exists('/tmp/destroying'): os.remove('/tmp/destroying') while True: replay = Replay() if replay.replay is not None: replay.add_job() replay.record() replay.move() replay.encode() replay.set_description() replay.create_thumbnail() if self.config['upload_to_ia']: replay.upload_to_ia() if self.config['upload_to_yt']: replay.upload_to_yt() if self.config['remove_generated_files']: replay.remove_generated_files() replay.remove_job() replay.db.update_created_replay(challenge_id=replay.replay.id) replay.set_created() else: Logging().info( "No more replays. Waiting for replay submission") time.sleep(5) if self.gcloud: Gcloud().destroy_fcreplay() sys.exit(0) if self.debug: sys.exit(0)
def get_data(self, query): r = requests.post("https://www.fightcade.com/api/", json=query) if r.status_code == 500: Logging().error("500 Code, trying up to 3 times") raise IOError("Unable to get data") else: return r
def update_status(self, status): """Update the replay status """ Logging().info(f"Set status to {status}") with open('/tmp/fcreplay_status', 'w') as f: f.write(f"{self.replay.id} {status}") self.db.update_status(challenge_id=self.replay.id, status=status)
def get_replay(self, url, player_requested=False): """Get a single replay Args: url (String): Link to replay """ # Validate url, this could probably be done better pattern = re.compile( '^https://replay.fightcade.com/fbneo/.*/[0-9]*-[0-9]*$') if not pattern.match(url): return ('INVALID_URL') # Parse url emulator = url.split('/')[3] game = url.split('/')[4] challenge_id = url.split('/')[5] Logging().debug( f"Parsed url: emulator: {emulator}, game: {game}, challenge_id: {challenge_id}" ) if game not in self.config['supported_games']: return ('UNSUPPORTED_GAME') # Get play replays query = {"req": "searchquarks", "quarkid": challenge_id} r = self.get_data(query) # Look for replay in results: for i in r.json()['results']['results']: if challenge_id == i['quarkid']: return self.add_replay(replay=i, emulator=emulator, game=game, player_replay=player_requested) return False
def video_status(request): Logging().info("Check status for completed videos") # Get all replays that are completed, where video_processed is false to_check = db.get_unprocessed_replays() for replay in to_check: # Check if replay has embeded video link. Easy way to do this is to check # if a thumbnail is created Logging().info(f"Checking: {replay.id}") r = requests.get( f"https://archive.org/download/{replay.id.replace('@', '-')}/__ia_thumb.jpg" ) Logging().info(f"ID: {replay.id}, Status: {r.status_code}") if r.status_code == 200: db.set_replay_processed(challenge_id=replay.id) return json.dumps({"status": True})
def start_fcadefbneo(self, fcadefbneo_path=None, fc_challenge_id=None, game_name=None): Logging().info( f"/usr/bin/wine {fcadefbneo_path}/fcadefbneo.exe quark:stream,{game_name},{fc_challenge_id}.2,7100 -q" ) fbneo_rc = subprocess.run([ '/usr/bin/wine', f'{fcadefbneo_path}/fcadefbneo.exe', f'quark:stream,{game_name},{fc_challenge_id}.2,7100', '-q' ])
def __init__(self): config = Config().config if 'DEBUG' in config['loglevel']: sql_echo = True else: sql_echo = False # Create Engine try: Logging().info( f"Creating DB Instance with: {config['sql_baseurl']}") self.engine = create_engine(config['sql_baseurl'], echo=sql_echo) Base.metadata.create_all(self.engine) except Exception as e: Logging().error( f"Unable to connect to {config['sql_baseurl']}: {e}") raise e self.Session = sessionmaker(bind=self.engine)
def get_replay(self): """Get a replay from the database """ Logging().info('Getting replay from database') if self.config['player_replay_first']: replay = self.db.get_oldest_player_replay() if replay is not None: Logging().info('Found player replay to encode') return replay else: Logging().info('No more player replays') if self.config['random_replay']: Logging().info('Getting random replay') replay = self.db.get_random_replay() return replay else: Logging().info('Getting oldest replay') replay = self.db.get_oldest_replay() return replay
def set_description(self): """Set the description of the video Returns: Boolean: Success or failure """ Logging().info("Creating description") self.description_text = f"({self.replay.p1_loc}) {self.replay.p1} vs " \ f"({self.replay.p2_loc}) {self.replay.p2} - {self.replay.date_replay}" \ f"\nFightcade replay id: {self.replay.id}" # Read the append file: if self.config['description_append_file'][0] is True: # Check if file exists: if not os.path.exists(self.config['description_append_file'][1]): Logging().error( f"Description append file {self.config['description_append_file'][1]} doesn't exist" ) return False else: with open(self.config['description_append_file'][1], 'r') as description_append: self.description_text += "\n" + description_append.read() self.update_status(status.DESCRIPTION_CREATED) Logging().info("Finished creating description") # Add description to database Logging().info('Adding description to database') self.db.add_description(challenge_id=self.replay.id, description=self.description_text) Logging().debug( f"Description Text is: {self.description_text.encode('unicode-escape')}" ) return True
def encode(self): Logging().info("Encoding file") avi_files_list = os.listdir(f"{self.config['fcreplay_dir']}/finished") avi_dict = { i: int(i.split('_')[1].split('.')[0], 16) for i in avi_files_list } sorted_avi_files_list = [] for i in sorted(avi_dict.items(), key=lambda x: x[1]): sorted_avi_files_list.append(i[0]) avi_files = [ f"{self.config['fcreplay_dir']}/finished/" + i for i in sorted_avi_files_list ] # I can't stress enough how much you should not try and mess with the encoding settings! # 1. ffmpeg will not handle files generated by fbneo # 2. x264 for whatever reason inserts audio delay mencoder_options = [ 'mencoder', '-oac', 'mp3lame', '-lameopts', 'vbr=3', '-ovc', 'lavc', '-lavcopts', 'vcodec=mpeg4:vbitrate=4000', '-vf', 'flip,scale=800:600,dsize=4/3', *avi_files, '-of', 'lavf', '-o', f"{self.config['fcreplay_dir']}/finished/{self.replay.id}.mkv" ] Logging().info(f"Running mencoder with: {' '.join(mencoder_options)}") mencoder_rc = subprocess.run(mencoder_options, capture_output=True) try: mencoder_rc.check_returncode() except subprocess.CalledProcessError as e: Logging().error( f"Unable to process avi files. Return code: {e.returncode}, stdout: {mencoder_rc.stdout}, stderr: {mencoder_rc.stderr}" ) raise e
def upload_to_ia(self): """Upload to internet archive Sometimes it will return a 403, even though the file doesn't already exist. So we decorate the function with the @retry decorator to try again in a little bit. Max of 3 tries """ self.update_status(status.UPLOADING_TO_IA) title = f"{self.config['supported_games'][self.replay.game]['game_name']}: ({self.replay.p1_loc}) {self.replay.p1} vs" \ f"({self.replay.p2_loc}) {self.replay.p2} - {self.replay.date_replay}" filename = f"{self.replay.id}.mkv" date_short = str(self.replay.date_replay)[10] # Make identifier for Archive.org ident = str(self.replay.id).replace("@", "-") fc_video = get_item(ident) metadata = { 'title': title, 'mediatype': self.config['ia_settings']['mediatype'], 'collection': self.config['ia_settings']['collection'], 'date': date_short, 'description': self.description_text, 'subject': self.config['ia_settings']['subject'], 'creator': self.config['ia_settings']['creator'], 'language': self.config['ia_settings']['language'], 'licenseurl': self.config['ia_settings']['license_url'] } Logging().info("Starting upload to archive.org") fc_video.upload(f"{self.config['fcreplay_dir']}/finished/{filename}", metadata=metadata, verbose=True) self.update_status(status.UPLOADED_TO_IA) Logging().info("Finished upload to archive.org")
def get_current_job_remaining(): # Returns the time left to complete current job db = Database() job = db.get_current_job() current_time = datetime.datetime.utcnow() start_time = job.start_time length = job.length running_time = int((current_time - start_time).seconds) time_left = length - running_time Logging().info( f"Current job status: running_time: {running_time}, time_left: {time_left}" ) if time_left <= 0: # Time left is less than 0, probably uploading or doing something return 0 else: return time_left
def get_game_replays(self, game): """Get game replays Args: game (String): Gameid """ if game not in self.config['supported_games']: return ('UNSUPPORTED_GAME') query = {'req': 'searchquarks', 'gameid': game} r = self.get_data(query) for i in r.json()['results']['results']: if i['emulator'] == 'fbneo' and i['live'] is False: status = self.add_replay(replay=i, emaultor=i['emaultor'], game=game, player_replay=False) if status != 'ADDED': Logging().info(f'Not adding game, Status: {status}') return ("ADDED")
def get_ranked_replays(self, game, username=None, pages=None): """Get ranked replays Args: game (String): Gameid username (String, optional): Player profile name. Defaults to None. """ if game not in self.config['supported_games']: return ('UNSUPPORTED_GAME') query = {"req": "searchquarks", "best": True, "gameid": game} if username is not None: query['username'] = username replays = [] if pages is None: query['offset'] = 0 r = self.get_data(query) replays += r.json()['results']['results'] else: for page in range(0, pages): query['offset'] = page r = self.get_data(query) replays += r.json()['results']['results'] for i in replays: if i['emulator'] == 'fbneo' and i['live'] is False: status = self.add_replay(replay=i, emulator=i['emulator'], game=game, player_replay=False) if status != 'ADDED': Logging().info(f'Not adding game, Status: {status}') return ("ADDED")
def record(self): """Start recording a replay """ Logging().info( f"Starting capture with {self.replay.id} and {self.replay.length}") time_min = int(self.replay.length / 60) Logging().info(f"Capture will take {time_min} minutes") self.update_status(status.RECORDING) # Star a recording store recording status Logging().debug(f"""Starting record.main with argumens: fc_challange_id={self.replay.id}, fc_time={self.replay.length}, kill_time={self.config['record_timeout']}, fcadefbneo_path={self.config['fcadefbneo_path']}, fcreplay_path={self.config['fcreplay_dir']}, game_name={self.replay.game}""") record_status = Record().main( fc_challange_id=self.replay.id, fc_time=self.replay.length, kill_time=self.config['record_timeout'], fcadefbneo_path=self.config['fcadefbneo_path'], fcreplay_path=self.config['fcreplay_dir'], game_name=self.replay.game) # Check recording status if record_status != "Pass": Logging().error(f"Recording failed on {self.replay.id}," "Status: {record_status}, exiting.") if record_status == "FailTimeout": raise TimeoutError else: Logging().error(f"Unknown error: ${record_status}, exiting") raise ValueError Logging().info("Capture finished") self.update_status(status.RECORDED) return True
def main(self, fc_challange_id=None, fc_time=None, kill_time=None, fcadefbneo_path=None, fcreplay_path=None, game_name=None): Logging().info('Starting pulseaudio') subprocess.run(['pulseaudio', '--daemon']) # Get start time begin_time = datetime.datetime.now() # Make sure 'started.inf' is missing if os.path.exists(f"{fcadefbneo_path}/fightcade/started.inf"): os.remove(f"{fcadefbneo_path}/fightcade/started.inf") # Start ggpofbneo Logging().info("Starting fcadefbneo thread") Logging().debug( f"Arguments: {fcadefbneo_path}, {fc_challange_id}, {game_name}") ggpo_thread = threading.Thread( target=self.start_fcadefbneo, args=[fcadefbneo_path, fc_challange_id, game_name]) ggpo_thread.start() Logging().info("Started ggpofbneo") # Check to see if fcadefbneo has started playing Logging().info('Checking to see if replay has started') while True: running_time = (datetime.datetime.now() - begin_time).seconds if os.path.exists(f"{fcadefbneo_path}/fightcade/started.inf"): Logging().info( 'First frame displayed. Looking for recording dialog') if self.find_record_dialog(): break # Timeout reached, exiting if running_time > kill_time: Logging().info('Match never started, exiting') self.cleanup_tasks() return "FailTimeout" time.sleep(0.1) begin_time = datetime.datetime.now() minute_count = -1 while True: running_time = (datetime.datetime.now() - begin_time).seconds # Log what minute we are on if (running_time % 60) == 0 and int(running_time / 60) != minute_count: Logging().info( f'Minute: {int(running_time/60)} of {int(fc_time/60)}') minute_count = int(running_time / 60) # Finished recording video if running_time > fc_time: # We need to manually stop the recording. Move the mouse into the # fcadefbneo window, press alt, then down*7, then enter/return. pyautogui.moveTo(700, 384) time.sleep(0.05) pyautogui.press('alt') time.sleep(0.05) pyautogui.press('down') time.sleep(0.05) pyautogui.press('down') time.sleep(0.05) pyautogui.press('down') time.sleep(0.05) pyautogui.press('down') time.sleep(0.05) pyautogui.press('down') time.sleep(0.05) pyautogui.press('down') time.sleep(0.05) pyautogui.press('down') time.sleep(0.05) pyautogui.keyDown('enter') time.sleep(0.05) pyautogui.keyUp('enter') # Sleep for 2 seconds here in case there is some sort of delay writing file time.sleep(2) self.cleanup_tasks() return "Pass" # Kill Timeout reached if running_time > (running_time + kill_time): return "FailTimeout" time.sleep(0.2)
def get_replay_status(challenge_id): db = Database() replay = db.get_single_replay(challenge_id=challenge_id) Logging().info(f"Current job STATUS is: {replay.status}") return (replay.status)
def get_top_weekly(request): Logging().info(Getreplay().get_top_weekly())
def check_environment(request): Logging().info(os.environ)
def get_current_job_details(): challenge_id = get_current_job_id() db = Database() replay = db.get_single_replay(challenge_id=challenge_id) Logging().info(f"Current job rowdata is: {replay}") return (replay)
def add_replay(self, replay, emulator, game, player_replay=True): challenge_id = replay['quarkid'] p1_loc = replay['players'][0]['country'] p2_loc = replay['players'][1]['country'] p1 = replay['players'][0]['name'] p2 = replay['players'][1]['name'] date_replay = datetime.datetime.fromtimestamp(replay['date'] // 1000) length = replay['duration'] created = False failed = False status = 'ADDED' date_added = datetime.datetime.utcnow() player_requested = player_replay if 'rank' in replay['players'] or 'rank' in replay['players'][1]: if replay['players'][0]['rank'] is None: p1_rank = '0' else: p1_rank = replay['players'][0]['rank'] if replay['players'][1]['rank'] is None: p2_rank = '0' else: p2_rank = replay['players'][1]['rank'] else: p1_rank = '0' p2_rank = '0' # Insert into database Logging().info(f"Looking for {challenge_id}") # Check if replay exists data = self.db.get_single_replay(challenge_id=challenge_id) if data is None: # Limit the length of videos if length > int(self.config['min_replay_length']) and length < int( self.config['max_replay_length']): Logging().info(f"Adding {challenge_id} to queue") self.db.add_replay(challenge_id=challenge_id, p1_loc=p1_loc, p2_loc=p2_loc, p1_rank=p1_rank, p2_rank=p2_rank, p1=p1, p2=p2, date_replay=date_replay, length=length, created=created, failed=failed, status=status, date_added=date_added, player_requested=player_requested, game=game, emulator=emulator, video_processed=False) return ('ADDED') else: Logging().info(f"{challenge_id} is only {length} not adding") if player_replay: return ('TOO_SHORT') else: Logging().info(f"{challenge_id} already exists") if player_replay: # Check if the returned replay is a player replay if data.player_requested: return ('ALREADY_EXISTS') else: # Update DB to mark returned replay as player replay self.db.update_player_requested(challenge_id=challenge_id) return ('MARKED_PLAYER') return ('ALREADY_EXISTS')
def destroy_fcreplay(self, failed=False): """Destry the current compute engine Checks for the existance of /tmp/destroying. If it exists then don't try and destroy fcreplay Args: failed (bool, optional): Updates the replay to failed. Defaults to False. """ # Create destroying file try: Path('/tmp/destroying').touch(0o644, exist_ok=False) except FileExistsError: # File already exists, not running sys.exit(0) Logging().info("Starting destroy_fcreplay") RECEIVING_FUNCTION = 'destroy_fcreplay_instance' HOSTNAME = socket.gethostname() if 'fcreplay-image-' not in HOSTNAME: Logging().info(f"Not destroying {HOSTNAME}") return (False) # Only retry if failed is false, by default this is false, but sometimes recording # fails. So we don't want to try and re-record them until we work out why they # have failed. if failed is False: try: with open('/tmp/fcreplay_status', 'r') as f: line = f.readline() local_replay_id = line.split()[0].strip() local_replay_status = line.split()[1].strip() if local_replay_status in [ 'UPLOADING_TO_IA', 'UPLOADING_TO_YOUTUBE', 'UPLOADED_TO_IA', 'UPLOADED_TO_YOUTUBE' ]: Logging().error( f"Not able to safely recover replay {local_replay_id}") elif local_replay_status not in [ 'FINISHED', 'REMOVED_GENERATED_FILES' ]: # Replay was in the middle of processing, going to set replay to be re-recorded db = Database() db.rerecord_replay(challenge_id=local_replay_id) except FileNotFoundError: Logging().error('/tmp/fcreplay_status not found') function_url = f'https://{self.REGION}-{self.PROJECT_ID}.cloudfunctions.net/{RECEIVING_FUNCTION}' metadata_server_url = \ f"http://metadata/computeMetadata/v1/instance/service-accounts/{self.config['gcloud_compute_service_account']}/identity?audience=" token_full_url = metadata_server_url + function_url token_headers = {'Metadata-Flavor': 'Google'} # Fetch the token token_response = requests.get(token_full_url, headers=token_headers) jwt = token_response.text # Provide the token in the request to the receiving function function_headers = {'Authorization': f'bearer {jwt}'} function_response = requests.post(function_url, headers=function_headers, json={'instance_name': HOSTNAME}) Logging().info( f"destroy_fcreplay retruned: {function_response.status_code}") status = function_response.status_code if self.config['gcloud_shutdown_instance']: subprocess.run(['shutdown', 'now', '-h']) return (status)
def get_current_job_id(): db = Database() job = db.get_current_job() Logging().info(f"Current job ID is: {job.challenge_id}") return (job.challenge_id)