Пример #1
0
def backup_media_to_s3(firebase_db):
    forbidden_events = []

    # get all events in firebase that do not have a 'backup_link'
    for event in firebase_db.get_events_that_need_backup_iter():
        event_id = event['id']
        src_url = event['source_url']
        media_type = event['type']

        try:
            filename, filepath = file_utils.download_file_from_url(src_url, media_type, event_id)

            s3_utils.upload(filepath, key=filename)

            # delete local file
            file_utils.delete_file(filepath)

            # update firebase with 'backup_link'
            event['backup_link'] = filename
            firebase_db.update(event)

        except HTTPError as e:
            if e.code == 403:
                # very infrequently, a 403 is retured from the download link
                # so this event is useless, delete it from firebase
                logger.warn('This event has media that is forbidden. Deleting event {}'.format(event))

                firebase_db.delete(event['id'])
                forbidden_events.append(event)

                logger.warn('Deleted event {}'.format(event))

    logger.warn('Deleted {} events from firebase: {}'.format(len(forbidden_events), forbidden_events)) if forbidden_events else logger.info('')
    logger.info('finished backup all media')
Пример #2
0
 def __init__(self):
     self.queue = Queue.Queue()
     self.docker_client = client.Client()
     config_path = os.environ.get(SearchAndValidate.CONFIG_ENV_NAME) or SearchAndValidate.CONFIG_FILE_PATH
     self.config = yaml.safe_load(open(config_path))
     self.pulled_images = []
     self.failed_images = []
     file_utils.delete_file()
Пример #3
0
 def _save_result(self):
     file_utils.delete_file()
     with open('./results.txt', mode='w') as out_file:
         if len(self.failed_images) > 0:
             print >>out_file, 'Following images could not be pulled'
             print >>out_file, '\n'.join(self.failed_images)
         if len(self.pulled_images) > 0:
             print >>out_file, '\nFollowing images have been pulled'
             print >>out_file, '\n'.join(self.pulled_images)
Пример #4
0
def predict(path):
    print('Got prediction request.')
    remote_addr = request.remote_addr
    filename = download_file('http://' + remote_addr + ':3001/public/' + path)
    prediction = model.predict_by_path('uploads/' + filename)
    prediction['exterior'] = prediction['exterior'].item()
    prediction['interior'] = prediction['interior'].item()
    prediction = json.dumps(prediction)
    delete_file('uploads/' + filename)
    print('Sending prediction response.')
    return prediction, 200
Пример #5
0
 def flush_old_records(self):
     '''deletes old runs, associated backup files and orders (deleting runs delete cascade associated orders)'''
     old_runs = self._get_old_runs()
     try:
         for run in old_runs:
             orders_in_run = self.session.query(Order).filter_by(
                 run_obj=run).all()
             logging.info(
                 f'Deleting {len(orders_in_run)} orders associated with old {run} and backup file: {run.fpath}'
             )
             delete_file(run.fpath)
             self.session.delete(run)
         self.session.commit()
     except Exception as e:
         logging.warning(
             f'Unexpected err while flushing old records from db inside flush_old_records. Err: {e}. Last recorded run {run}'
         )
def ingest():
    file_utils.download_and_extract(
            config.NSL_ARCHIVE_URL, 
            config.NSL_DOWNLOAD_LOCATION)

    xml_obj = open_and_objectify(os.path.join(config.NSL_DOWNLOAD_LOCATION, "SEnsl_ssi.xml"))
    nsl_other_xml_obj = open_and_objectify(os.path.join(config.NSL_DOWNLOAD_LOCATION, "SEnsl_other.xml"))

    nsl_dict = NslConverter().convert(xml_obj)
    nsl_other_dict = NslOtherConverter().convert(nsl_other_xml_obj)

    dump_nsl(nsl_dict)
    dump_nsl_other(nsl_other_dict)
    dump_dictionaries()

    file_utils.cleanup(config.NSL_DOWNLOAD_LOCATION)
    file_utils.zipdir(config.NSL_JSON_OUTPUT_PATH, config.NSL_ARCHIVE_NAME)
    file_utils.cleanup(config.NSL_JSON_OUTPUT_PATH)
    file_utils.upload_file(config.NSL_DIFF_UPDATE_URL, config.NSL_ARCHIVE_NAME)
    file_utils.delete_file(config.NSL_ARCHIVE_NAME)
Пример #7
0
import image_check_exceptions

__author__ = 'A.P. Rajshekhar'

import validate_images
import argparse
from image_check_exceptions import ImageCheckException
import file_utils

def parse_arg():
    parser = argparse.ArgumentParser()
    parser.add_argument("--env", help="the environment against which to run search")
    args = parser.parse_args()
    if args.env:
        return args.env
    else:
        return 'ci'

if __name__ == '__main__':
    check_image = validate_images.SearchAndValidate()
    try:
        check_image.start_check(parse_arg())
    except ImageCheckException, e:
        raise
    except:
        file_utils.delete_file()
        file_utils.write("Build has thrown exception not related to pull of image. Please check.")

Пример #8
0
 def delete_old_files(self):
     '''addresses potential double loading of same LP sheets to Excel problem,
     deletes csv files from Helper Files dir before new run'''
     delete_file(self.lp_filename)
     delete_file(self.lp_tracked_filename)
Пример #9
0
origin_url = 'https://hwcdn.libsyn.com/p/f/f/7/ff7f1a6e683c7dbb/37-_Go_East_Young_Man.mp3?c_id=1453698&cs_id=1453698&expiration=1590267009&hwt=216c6584f6ef2a42be776e773cc7d40b'

bucket = 'test-text-to-speech'
filename = 'bronze-age-collapse.mp3'

# print('Downloading file...')
file_utils.download_file_from_url(origin_url, filename)
# print('...done')

print('Uploading to s3...')
response = s3_utils.upload_file(filename, 'test-text-to-speech')
print(response)
print('...done')

print('Deleting local copy...')
file_utils.delete_file(filename)
print('...done')

job_name = 'job-%s' % uuid.uuid1()
print(job_name)

object_url = "https://%s.s3.amazonaws.com/%s" % (bucket, filename)
print(object_url)

print('Starting transcription job...')
response = sound_stuff.start_transcription_job(job_name, object_url, 'mp3')
print(response)
print('...done')

print('Waiting on transcription task...')
sound_stuff.wait_for_transaction_job(job_name)
Пример #10
0
normalize_fx = (
    AudioEffectsChain()
    .compand(**m_settings.compand)
    .normalize()
)


# perform audio effects on infile and save results to effected files
normalized_file = file_utils.get_test_outfile_path()
normalize_fx(infile, normalized_file, sample_out=48000, channels_out=1)


# STEP 3: Isolate speech and make non-speech frames silent
isolated_speech_file = isolate_speech(normalized_file)
file_utils.delete_file(normalized_file)


# STEP 4: eq the signal
eq_fx = (
    AudioEffectsChain()
    .highpass(**eq_settings.highpass)
    .lowshelf(**eq_settings.lowShelf)
    .equalizer(**eq_settings.lowFrequency)
    .equalizer(**eq_settings.midFrequency)
    .highshelf(**eq_settings.highShelf)
    # .reverb(**m_settings.reverb)
)
eq_file = file_utils.get_test_outfile_path()
eq_fx(isolated_speech_file, eq_file, sample_out=48000, channels_out=1)