Exemplo n.º 1
0
def fcreplay_running(request):
    logging.info("Checking if there are instances running")
    instance_name = "fcreplay-image-"
    compute = googleapiclient.discovery.build('compute', 'v1')
    result = compute.instances().list(project=config['gcloud_project'],
                                      zone=config['gcloud_zone']).execute()

    instance_count = 0
    for i in result['items']:
        if instance_name in i['name']:
            # Count number of running instances
            if i['status'] == "RUNNING":
                logging.info(f"{i['name']} instance running adding to count")
                instance_count += 1

            # Count number of 'other' instances
            else:
                logging.info(
                    f"{instance_name} status is {i['status']}, adding to count"
                )
                instance_count += 1

    if instance_count >= config['gcloud_instance_max']:
        logging.info(
            f"There are {instance_count}/{config['gcloud_instance_max']} running"
        )
        return (json.dumps({'status': True}))

    logging.info(
        f"There are {instance_count}/{config['gcloud_instance_max']} running")
    return (json.dumps({'status': False}))
Exemplo n.º 2
0
def get_top_weekly():
    """Get the top weekly replays
    """
    today = datetime.datetime.today()
    start_week = today - timedelta(days=today.weekday())
    start_week_ms = int(start_week.timestamp() * 1000)
    query = {'req': 'searchquarks', 'best': True, 'since': start_week_ms}

    replays = []
    for i in range(0, 3):
        query['offset'] = i * 15
        r = get_data(query)
        replays += r.json()['results']['results']

    for i in replays:
        if 'gameid' not in config['supported_games']:
            logging.info(
                f"Game {i['gameid']} not supported for replay {i['quarkid']}")
            pass
        status = add_replay(replay=i,
                            emulator=i['emulator'],
                            game=i['gameid'],
                            player_replay=False)
        if status != 'ADDED':
            logging.info(f"Not adding replay {i['quarkid']}, Status: {status}")

    return ("ADDED")
Exemplo n.º 3
0
 def update_status(self, status):
     """Update the replay status
     """
     logging.info(f"Set status to {status}")
     with open('/tmp/fcreplay_status', 'w') as f:
         f.write(f"{self.replay.id} {status}")
     self.db.update_status(challenge_id=self.replay.id, status=status)
Exemplo n.º 4
0
    def remove_generated_files(self):
        """Remove generated files

        Generated files are thumbnail and videofile
        """
        logging.info("Removing old files")
        filename = f"{self.replay.id}.mkv"
        os.remove(f"{self.config['fcreplay_dir']}/finished/{filename}")
        os.remove(f"{self.config['fcreplay_dir']}/tmp/thumbnail.jpg")

        self.update_status("REMOVED_GENERATED_FILES")
        logging.info("Finished removing files")
Exemplo n.º 5
0
def launch_fcreplay(request):
    logging.info("Running: launch_fcreplay")

    # Check if instance is running
    running = json.loads(fcreplay_running(None))
    if running['status']:
        return (json.dumps({"status": False}))

    # Generate instance name uuid
    instance_name = 'fcreplay-image-' + str(uuid.uuid1())

    # Starting compute engine
    compute = googleapiclient.discovery.build('compute', 'v1')

    instance_body = {
        'name':
        instance_name,
        'machineType':
        f"zones/{config['gcloud_zone']}/machineTypes/custom-6-5632",
        "networkInterfaces": [{
            "network":
            "global/networks/default",
            "accessConfigs": [{
                "type": "ONE_TO_ONE_NAT",
                "name": "External NAT",
                "setPublicPtr": False,
                "networkTier": "STANDARD"
            }]
        }],
        'disks': [{
            "boot": True,
            "initializeParams": {
                "sourceImage": "global/images/fcreplay-image"
            },
            "autoDelete": True
        }],
        'scheduling': {
            'preemptible': True
        },
        "serviceAccounts": [{
            "email":
            config['gcloud_compute_service_account'],
            "scopes": ["https://www.googleapis.com/auth/cloud-platform"]
        }]
    }

    result = compute.instances().insert(project=config['gcloud_project'],
                                        zone=config['gcloud_zone'],
                                        body=instance_body).execute()

    wait_for_operation(compute, config['gcloud_project'],
                       config['gcloud_zone'], result['name'])
    return (json.dumps({"status": True}))
Exemplo n.º 6
0
def wait_for_operation(compute, project, zone, operation):
    logging.info('Waiting for operation to finish...')
    while True:
        result = compute.zoneOperations().get(project=project,
                                              zone=zone,
                                              operation=operation).execute()

        if result['status'] == 'DONE':
            logging.info("done.")
            if 'error' in result:
                raise Exception(result['error'])
            return result
        time.sleep(1)
Exemplo n.º 7
0
    def create_thumbnail(self):
        """Create thumbnail from video
        """
        logging.info("Making thumbnail")
        filename = f"{self.replay.id}.mkv"
        subprocess.run([
            "ffmpeg", "-ss", "20", "-i",
            f"{self.config['fcreplay_dir']}/finished/{filename}", "-vframes:v",
            "1", f"{self.config['fcreplay_dir']}/tmp/thumbnail.jpg"
        ])

        self.update_status('THUMBNAIL_CREATED')
        logging.info("Finished making thumbnail")
Exemplo n.º 8
0
        def failed(self, *args, **kwargs):
            try:
                return func(self, *args, **kwargs)
            except Exception as e:
                trace_back = sys.exc_info()[2]
                logging.error(
                    f"Excption: {str(traceback.format_tb(trace_back))},  shutting down"
                )
                logging.info(f"Setting {self.replay.id} to failed")
                self.db.update_failed_replay(challenge_id=self.replay.id)
                self.update_status("FAILED")

                if self.config['gcloud_destroy_on_fail']:
                    destroy_fcreplay(failed=True)
                sys.exit(1)
Exemplo n.º 9
0
def main(Debug, Gcloud):
    """The main loop for processing one or more replays

    Args:
        Debug (bool): Exit after one loop
        Gcloud (bool): Cloud shutdown after processing
    """
    # If this is google cloud, and the 'destroying' file exists, remove it
    if Gcloud and os.path.exists('/tmp/destroying'):
        os.remove('/tmp/destroying')

    while True:
        replay = Replay()
        if replay.replay is not None:
            replay.add_job()
            replay.record()
            replay.move()
            replay.encode()
            replay.set_description()
            replay.create_thumbnail()

            if config['upload_to_ia']:
                replay.upload_to_ia()

            if config['upload_to_yt']:
                replay.upload_to_yt()

            if config['remove_generated_files']:
                replay.remove_generated_files()

            replay.remove_job()

            replay.db.update_created_replay(challenge_id=replay.replay.id)
            replay.set_created()

        else:
            logging.info("No more replays. Waiting for replay submission")
            time.sleep(5)

        if Gcloud:
            destroy_fcreplay()
            sys.exit(0)

        if Debug:
            sys.exit(0)
Exemplo n.º 10
0
def destroy_stopped_instances(request):
    logging.info("Checking if there are instances stopped")
    instance_name = "fcreplay-image-"
    compute = googleapiclient.discovery.build('compute', 'v1')
    result = compute.instances().list(project=config['gcloud_project'],
                                      zone=config['gcloud_zone']).execute()

    # Destroy any stopped instances and exit
    for i in result['items']:
        if instance_name in i['name']:
            # Destroy stopped instances
            if i['status'] == "TERMINATED" and config[
                    'gcloud_destroy_when_stopped']:
                logging.info(f"Destoying {i['name']}")
                destroy_fcreplay_instance(instance_name=i['name'])
                return (json.dumps({'status': True}))

    return (json.dumps({'status': False}))
Exemplo n.º 11
0
    def get_replay(self):
        """Get a replay from the database
        """
        logging.info('Getting replay from database')
        if self.config['player_replay']:
            replay = self.db.get_oldest_player_replay()
            if replay is not None:
                logging.info('Found player replay to encode')
                return replay
            else:
                logging.info('No more player replays')

        if self.config['random_replay']:
            logging.info('Getting random replay')
            replay = self.db.get_random_replay()
            return replay
        else:
            logging.info('Getting oldest replay')
            replay = self.db.get_oldest_replay()

        return replay
Exemplo n.º 12
0
def destroy_fcreplay(failed=False):
    """Destry the current compute engine

    Checks for the existance of /tmp/destroying. If it exists then
    don't try and destroy fcreplay

    Args:
        failed (bool, optional): Updates the replay to failed. Defaults to False.
    """
    # Create destroying file
    try:
        Path('/tmp/destroying').touch(0o644, exist_ok=False)
    except FileExistsError:
        # File already exists, not running
        sys.exit(0)

    logging.info("Starting destroy_fcreplay")
    RECEIVING_FUNCTION = 'destroy_fcreplay_instance'
    HOSTNAME = socket.gethostname()

    if 'fcreplay-image-' not in HOSTNAME:
        logging.info(f"Not destroying {HOSTNAME}")
        return(False)

    # Only retry if failed is false, by default this is false, but sometimes recording
    # fails. So we don't want to try and re-record them until we work out why they
    # have failed.
    if failed is False:
        try:
            with open('/tmp/fcreplay_status', 'r') as f:
                line = f.readline()
                local_replay_id = line.split()[0].strip()
                local_replay_status = line.split()[1].strip()

            if local_replay_status in ['UPLOADING_TO_IA', 'UPLOADING_TO_YOUTUBE', 'UPLOADED_TO_IA', 'UPLOADED_TO_YOUTUBE']:
                logging.error(f"Not able to safely recover replay {local_replay_id}")
            elif local_replay_status not in ['FINISHED', 'REMOVED_GENERATED_FILES']:
                # Replay was in the middle of processing, going to set replay to be re-recorded
                db = Database()
                db.rerecord_replay(challenge_id=local_replay_id)
        except FileNotFoundError:
            logging.error('/tmp/fcreplay_status not found')

    function_url = f'https://{REGION}-{PROJECT_ID}.cloudfunctions.net/{RECEIVING_FUNCTION}'
    metadata_server_url = \
        f"http://metadata/computeMetadata/v1/instance/service-accounts/{config['gcloud_compute_service_account']}/identity?audience="
    token_full_url = metadata_server_url + function_url
    token_headers = {'Metadata-Flavor': 'Google'}

    # Fetch the token
    token_response = requests.get(token_full_url, headers=token_headers)
    jwt = token_response.text

    # Provide the token in the request to the receiving function
    function_headers = {'Authorization': f'bearer {jwt}'}
    function_response = requests.post(function_url, headers=function_headers, json={'instance_name': HOSTNAME})

    logging.info(f"destroy_fcreplay retruned: {function_response.status_code}")
    status = function_response.status_code
    return(status)
Exemplo n.º 13
0
def get_current_job_remaining():
    # Returns the time left to complete current job
    db = Database()
    challenge_id = get_current_job_id()

    job = db.get_current_job()
    current_time = datetime.datetime.utcnow()
    start_time = job.start_time
    length = job.length

    running_time = int((current_time - start_time).seconds)
    time_left = length - running_time

    logging.info(
        f"Current job status: running_time: {running_time}, time_left: {time_left}"
    )

    if time_left <= 0:
        # Time left is less than 0, probably uploading or doing something
        return 0
    else:
        return time_left
Exemplo n.º 14
0
def get_game_replays(game):
    """Get game replays

    Args:
        game (String): Gameid
    """
    if game not in config['supported_games']:
        return ('UNSUPPORTED_GAME')

    query = {'req': 'searchquarks', 'gameid': game}

    r = get_data(query)

    for i in r.json()['results']['results']:
        if i['emulator'] == 'fbneo' and i['live'] is False:
            status = add_replay(replay=i,
                                emaultor=i['emaultor'],
                                game=game,
                                player_replay=False)
            if status != 'ADDED':
                logging.info(f'Not adding game, Status: {status}')

    return ("ADDED")
Exemplo n.º 15
0
    def encode(self):
        logging.info("Encoding file")
        avi_files_list = os.listdir(f"{self.config['fcreplay_dir']}/finished")
        avi_dict = {
            i: int(i.split('_')[1].split('.')[0], 16)
            for i in avi_files_list
        }
        sorted_avi_files_list = []
        for i in sorted(avi_dict.items(), key=lambda x: x[1]):
            sorted_avi_files_list.append(i[0])
        avi_files = [
            f"{self.config['fcreplay_dir']}/finished/" + i
            for i in sorted_avi_files_list
        ]

        logging.info("Running mencoder with:" + " ".join([
            'mencoder', '-oac', 'mp3lame', '-lameopts', 'abr:br=128', '-ovc',
            'x264', '-x264encopts', 'preset=fast:crf=23:subq=1:threads=8',
            '-vf', 'flip,scale=800:600,dsize=4/3', *avi_files, '-o',
            f"{self.config['fcreplay_dir']}/finished/{self.replay.id}.mkv"
        ]))

        mencoder_rc = subprocess.run([
            'mencoder', '-oac', 'mp3lame', '-lameopts', 'abr:br=128', '-ovc',
            'x264', '-x264encopts', 'preset=slow:crf=23:subq=1:threads=8',
            '-vf', 'flip,scale=800:600,dsize=4/3', *avi_files, '-o',
            f"{self.config['fcreplay_dir']}/finished/{self.replay.id}.mkv"
        ],
                                     capture_output=True)

        try:
            mencoder_rc.check_returncode()
        except subprocess.CalledProcessError as e:
            logging.error(
                f"Unable to process avi files. Return code: {e.returncode}, stdout: {mencoder_rc.stdout}, stderr: {mencoder_rc.stderr}"
            )
            raise e
Exemplo n.º 16
0
    def upload_to_ia(self):
        """Upload to internet archive

        Sometimes it will return a 403, even though the file doesn't already
        exist. So we decorate the function with the @retry decorator to try
        again in a little bit. Max of 3 tries
        """
        self.update_status('UPLOADING_TO_IA')
        title = f"{self.config['supported_games'][self.replay.game]['game_name']}: ({self.replay.p1_loc}) {self.replay.p1} vs" \
                f"({self.replay.p2_loc}) {self.replay.p2} - {self.replay.date_replay}"
        filename = f"{self.replay.id}.mkv"
        date_short = str(self.replay.date_replay)[10]

        # Make identifier for Archive.org
        ident = str(self.replay.id).replace("@", "-")
        fc_video = get_item(ident)

        metadata = {
            'title': title,
            'mediatype': self.config['ia_settings']['mediatype'],
            'collection': self.config['ia_settings']['collection'],
            'date': date_short,
            'description': self.description_text,
            'subject': self.config['ia_settings']['subject'],
            'creator': self.config['ia_settings']['creator'],
            'language': self.config['ia_settings']['language'],
            'licenseurl': self.config['ia_settings']['license_url']
        }

        logging.info("Starting upload to archive.org")
        fc_video.upload(f"{self.config['fcreplay_dir']}/finished/{filename}",
                        metadata=metadata,
                        verbose=True)

        self.update_status('UPLOADED_TO_IA')
        logging.info("Finished upload to archive.org")
Exemplo n.º 17
0
def get_ranked_replays(game, username=None, pages=None):
    """Get ranked replays

    Args:
        game (String): Gameid
        username (String, optional): Player profile name. Defaults to None.
    """
    if game not in config['supported_games']:
        return ('UNSUPPORTED_GAME')

    query = {"req": "searchquarks", "best": True, "gameid": game}

    if username is not None:
        query['username'] = username

    replays = []
    if pages is None:
        query['offset'] = 0
        r = get_data(query)
        replays += r.json()['results']['results']
    else:
        for page in range(0, pages):
            query['offset'] = page
            r = get_data(query)
            replays += r.json()['results']['results']

    for i in replays:
        if i['emulator'] == 'fbneo' and i['live'] is False:
            status = add_replay(replay=i,
                                emulator=i['emulator'],
                                game=game,
                                player_replay=False)
            if status != 'ADDED':
                logging.info(f'Not adding game, Status: {status}')

    return ("ADDED")
Exemplo n.º 18
0
def check_for_replay(request):
    destroyed_instance = json.loads(destroy_stopped_instances(True))['status']

    if destroyed_instance:
        return json.dumps({'status': False})

    logging.info("Looking for replay")
    player_replay = db.get_oldest_player_replay()
    if player_replay is not None:
        logging.info("Found player replay")
        launch_fcreplay(None)
        return json.dumps({"status": True})

    replay = db.get_oldest_replay()
    if replay is not None:
        logging.info("Found replay")
        launch_fcreplay(None)
        return json.dumps({"status": True})

    logging.info("No replays")
    return json.dumps({"status": False})
Exemplo n.º 19
0
def video_status(request):
    logging.info("Check status for completed videos")

    # Get all replays that are completed, where video_processed is false
    to_check = db.get_unprocessed_replays()

    for replay in to_check:
        # Check if replay has embeded video link. Easy way to do this is to check
        # if a thumbnail is created
        logging.info(f"Checking: {replay.id}")
        r = requests.get(
            f"https://archive.org/download/{replay.id.replace('@', '-')}/__ia_thumb.jpg"
        )

        logging.info(f"ID: {replay.id}, Status: {r.status_code}")
        if r.status_code == 200:
            db.set_replay_processed(challenge_id=replay.id)

    return json.dumps({"status": True})
Exemplo n.º 20
0
    def record(self):
        """Start recording a replay
        """
        logging.info(
            f"Starting capture with {self.replay.id} and {self.replay.length}")
        time_min = int(self.replay.length / 60)
        logging.info(f"Capture will take {time_min} minutes")

        self.update_status('RECORDING')

        # Star a recording store recording status
        logging.debug(f"""Starting record.main with argumens:
            fc_challange_id={self.replay.id},
            fc_time={self.replay.length},
            kill_time={self.config['record_timeout']},
            fcadefbneo_path={self.config['fcadefbneo_path']},
            fcreplay_path={self.config['fcreplay_dir']},
            game_name={self.replay.game}""")
        record_status = fc_record.main(
            fc_challange_id=self.replay.id,
            fc_time=self.replay.length,
            kill_time=self.config['record_timeout'],
            fcadefbneo_path=self.config['fcadefbneo_path'],
            fcreplay_path=self.config['fcreplay_dir'],
            game_name=self.replay.game)

        # Check recording status
        if not record_status == "Pass":
            logging.error(f"Recording failed on {self.replay.id},"
                          "Status: \"{record_status}\", exiting.")

            if record_status == "FailTimeout":
                raise TimeoutError
            else:
                logging.error(f"Unknown error: ${record_status}, exiting")
                raise ValueError

        logging.info("Capture finished")
        self.update_status('RECORDED')

        return True
Exemplo n.º 21
0
    def set_description(self):
        """Set the description of the video

        Returns:
            Boolean: Success or failure
        """
        logging.info("Creating description")

        self.description_text = f"({self.replay.p1_loc}) {self.replay.p1} vs " \
                                f"({self.replay.p2_loc}) {self.replay.p2} - {self.replay.date_replay}" \
                                f"\nFightcade replay id: {self.replay.id}"

        # Read the append file:
        if self.config['description_append_file'][0] is True:
            # Check if file exists:
            if not os.path.exists(self.config['description_append_file'][1]):
                logging.error(
                    f"Description append file {self.config['description_append_file'][1]} doesn't exist"
                )
                return False
            else:
                with open(self.config['description_append_file'][1],
                          'r') as description_append:
                    self.description_text += "\n" + description_append.read()

        self.update_status('DESCRIPTION_CREATED')
        logging.info("Finished creating description")

        # Add description to database
        logging.info('Adding description to database')
        self.db.add_description(challenge_id=self.replay.id,
                                description=self.description_text)

        logging.debug(
            f"Description Text is: {self.description_text.encode('unicode-escape')}"
        )
        return True
Exemplo n.º 22
0
def get_current_job_details():
    challenge_id = get_current_job_id()
    db = Database()
    replay = db.get_single_replay(challenge_id=challenge_id)
    logging.info(f"Current job rowdata is: {replay}")
    return (replay)
Exemplo n.º 23
0
def get_replay_status(challenge_id):
    db = Database()
    replay = db.get_single_replay(challenge_id=challenge_id)
    logging.info(f"Current job STATUS is: {replay.status}")
    return (replay.status)
Exemplo n.º 24
0
            logging.info("No more replays. Waiting for replay submission")
            time.sleep(5)

        if Gcloud:
            destroy_fcreplay()
            sys.exit(0)

        if Debug:
            sys.exit(0)


def console():
    """Invoked from command line
    """
    parser = argparse.ArgumentParser(description='FCReplay - Video Catpure')
    parser.add_argument('--debug',
                        action='store_true',
                        help='Exits after a single loop')
    parser.add_argument('--gcloud',
                        action='store_true',
                        help='Enabled google cloud functions')
    args = parser.parse_args()
    main(args.debug, args.gcloud)


# Loop and choose a random replay every time
if __name__ == "__main__":
    console()

logging.info("Finished processing queue")
Exemplo n.º 25
0
def check_environment(request):
    logging.info(os.environ)
Exemplo n.º 26
0
def destroy_vm(compute, project, zone, instance_name):
    logging.info(f"Destroying: {instance_name}")
    result = compute.instances().delete(project=project,
                                        zone=zone,
                                        instance=instance_name).execute()
    wait_for_operation(compute, project, zone, result['name'])
Exemplo n.º 27
0
def destroy_fcreplay_instance(request=None, instance_name=None):
    if request is not None:
        request_json = request.get_json(silent=True)
    else:
        request_json = None

    logging.info(f"request_json: {request_json}")
    logging.info(f"instance_name: {instance_name}")
    if (request_json is not None
            and 'instance_name' in request_json) or instance_name is not None:
        if request_json is not None:
            logging.info("Setting instance name from json")
            instance_name = request_json['instance_name']

        if 'fcreplay-image-' not in instance_name:
            logging.info(f"Not deleting {instance_name}")
            return json.dumps({"status": False})

        logging.info(f"Deleting {instance_name} compute instance")

        compute = googleapiclient.discovery.build('compute', 'v1')
        result = compute.instances().stop(project=config['gcloud_project'],
                                          zone=config['gcloud_zone'],
                                          instance=instance_name).execute()

        wait_for_operation(compute, config['gcloud_project'],
                           config['gcloud_zone'], result['name'])

        destroy_vm(compute, config['gcloud_project'], config['gcloud_zone'],
                   instance_name)
        return json.dumps({"status": True})

    logging.info('No instance_name found')
    return json.dumps({"status": False})
Exemplo n.º 28
0
def get_current_job_id():
    db = Database()
    job = db.get_current_job()
    logging.info(f"Current job ID is: {job.challenge_id}")
    return (job.challenge_id)
Exemplo n.º 29
0
def get_top_weekly(request):
    logging.info(getreplay.get_top_weekly())
Exemplo n.º 30
0
def add_replay(replay, emulator, game, player_replay=True):
    challenge_id = replay['quarkid']
    p1_loc = replay['players'][0]['country']
    p2_loc = replay['players'][1]['country']
    p1 = replay['players'][0]['name']
    p2 = replay['players'][1]['name']
    date_replay = datetime.datetime.fromtimestamp(replay['date'] // 1000)
    length = replay['duration']
    created = False
    failed = False
    status = 'ADDED'
    date_added = datetime.datetime.utcnow()
    player_requested = player_replay

    if 'rank' in replay['players'] or 'rank' in replay['players'][1]:
        if replay['players'][0]['rank'] is None:
            p1_rank = '0'
        else:
            p1_rank = replay['players'][0]['rank']
        if replay['players'][1]['rank'] is None:
            p2_rank = '0'
        else:
            p2_rank = replay['players'][1]['rank']
    else:
        p1_rank = '0'
        p2_rank = '0'

    # Insert into database
    logging.info(f"Looking for {challenge_id}")

    # Check if replay exists
    data = db.get_single_replay(challenge_id=challenge_id)
    if data is None:
        # Limit the lenfth of videos
        if length > int(config['min_replay_length']) and length < int(
                config['max_replay_length']):
            logging.info(f"Adding {challenge_id} to queue")
            db.add_replay(challenge_id=challenge_id,
                          p1_loc=p1_loc,
                          p2_loc=p2_loc,
                          p1_rank=p1_rank,
                          p2_rank=p2_rank,
                          p1=p1,
                          p2=p2,
                          date_replay=date_replay,
                          length=length,
                          created=created,
                          failed=failed,
                          status=status,
                          date_added=date_added,
                          player_requested=player_requested,
                          game=game,
                          emulator=emulator,
                          video_processed=False)
            return ('ADDED')
        else:
            logging.info(f"{challenge_id} is only {length} not adding")
            if player_replay:
                return ('TOO_SHORT')
    else:
        logging.info(f"{challenge_id} already exists")
        if player_replay:
            # Check if the returned replay is a player replay
            if data.player_requested:
                return ('ALREADY_EXISTS')
            else:
                # Update DB to mark returned replay as player replay
                db.update_player_requested(challenge_id=challenge_id)
                return ('MARKED_PLAYER')
        return ('ALREADY_EXISTS')