def regenerated_original_images(galaxy_name, run_id, galaxy_id, s3Helper, connection):
    """
    We need to regenerate the image
    :param galaxy_name:
    :param run_id:
    :param galaxy_id:
    :return: if we succeed
    """
    all_ok = False

    # Get the fits file
    bucket = s3Helper.get_bucket(get_files_bucket())
    galaxy_file_name = get_galaxy_file_name(galaxy_name, run_id, galaxy_id)
    key_name = '{0}/{0}.fits'.format(galaxy_name)
    key = bucket.get_key(key_name)
    if key is None:
        LOG.error('The fits file does not seem to exists')
        return all_ok

    path_name = get_temp_file('fits')
    key.get_contents_to_filename(path_name)

    # Now regenerate
    try:
        image = FitsImage(connection)
        image.build_image(path_name, galaxy_file_name, galaxy_id, get_galaxy_image_bucket())
        all_ok = True
    except Exception:
        LOG.exception('Major error')
        all_ok = False
    finally:
        os.remove(path_name)
    return all_ok
示例#2
0
def migrate_image_files(connection, image_bucket_name, file_bucket_name, s3helper):
    for file_name in glob.glob('/home/ec2-user/galaxyImages/*/*'):
        (name, version, extension) = get_name_version(file_name)

        # Only migrate the images in the original galaxy is still in the database
        galaxy = connection.execute(select([GALAXY]).where(and_(GALAXY.c.name == name, GALAXY.c.version_number == version))).first()
        if galaxy is not None:
            if extension == '.fits':
                add_file_to_bucket1(file_bucket_name, get_key_fits(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id]), file_name, s3helper)
            else:
                galaxy_key = get_galaxy_file_name(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id])
                if file_name.endswith('_tn_colour_1.png'):
                    add_file_to_bucket1(image_bucket_name, get_thumbnail_colour_image_key(galaxy_key, 1), file_name, s3helper)
                elif file_name.endswith('_colour_1.png'):
                    add_file_to_bucket1(image_bucket_name, get_colour_image_key(galaxy_key, 1), file_name, s3helper)
                elif file_name.endswith('_colour_2.png'):
                    add_file_to_bucket1(image_bucket_name, get_colour_image_key(galaxy_key, 2), file_name, s3helper)
                elif file_name.endswith('_colour_3.png'):
                    add_file_to_bucket1(image_bucket_name, get_colour_image_key(galaxy_key, 3), file_name, s3helper)
                elif file_name.endswith('_colour_4.png'):
                    add_file_to_bucket1(image_bucket_name, get_colour_image_key(galaxy_key, 4), file_name, s3helper)
                elif file_name.endswith('_mu.png'):
                    add_file_to_bucket1(image_bucket_name, get_build_png_name(galaxy_key, 'mu'), file_name, s3helper)
                elif file_name.endswith('_m.png'):
                    add_file_to_bucket1(image_bucket_name, get_build_png_name(galaxy_key, 'm'), file_name, s3helper)
                elif file_name.endswith('_ldust.png'):
                    add_file_to_bucket1(image_bucket_name, get_build_png_name(galaxy_key, 'ldust'), file_name, s3helper)
                elif file_name.endswith('_sfr.png'):
                    add_file_to_bucket1(image_bucket_name, get_build_png_name(galaxy_key, 'sfr'), file_name, s3helper)
示例#3
0
def get_hdf5_from_s3(galaxy, directory):
    bucket_name = get_saved_files_bucket()
    key = get_key_hdf5(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id],
                       galaxy[GALAXY.c.galaxy_id])
    s3_helper = S3Helper()
    if s3_helper.file_exists(bucket_name, key):
        if s3_helper.file_archived(bucket_name, key):
            # file is archived
            if s3_helper.file_restoring(bucket_name, key):
                # if file is restoring, just need to wait for it
                LOG.info(
                    'Galaxy {0} ({1}) is still restoring from glacier'.format(
                        galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id]))
            else:
                # if file is not restoring, need to request.
                LOG.info('Making request for archived galaxy {0} ({1})'.format(
                    galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id]))
                s3_helper.restore_archived_file(bucket_name, key, days=10)
        else:
            # file is not archived
            LOG.info('Galaxy {0} ({1}) is available in s3'.format(
                galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id]))
            filename = os.path.join(
                directory,
                get_galaxy_file_name(galaxy[GALAXY.c.name],
                                     galaxy[GALAXY.c.run_id],
                                     galaxy[GALAXY.c.galaxy_id])) + '.hdf5'
            s3_helper.get_file_from_bucket(bucket_name=bucket_name,
                                           key_name=key,
                                           file_name=filename)

    else:
        LOG.info('The key {0} in bucket {1} does not exist'.format(
            key, bucket_name))
示例#4
0
def get_hdf5_from_s3(galaxy, directory):
    bucket_name = get_saved_files_bucket()
    key = get_key_hdf5(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id])
    s3_helper = S3Helper()
    if s3_helper.file_exists(bucket_name, key):
        if s3_helper.file_archived(bucket_name, key):
            # file is archived
            if s3_helper.file_restoring(bucket_name, key):
                # if file is restoring, just need to wait for it
                LOG.info(
                    'Galaxy {0} ({1}) is still restoring from glacier'.format(
                        galaxy[GALAXY.c.name],
                        galaxy[GALAXY.c.run_id]
                    )
                )
            else:
                # if file is not restoring, need to request.
                LOG.info('Making request for archived galaxy {0} ({1})'.format(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id]))
                s3_helper.restore_archived_file(bucket_name, key, days=10)
        else:
            # file is not archived
            LOG.info('Galaxy {0} ({1}) is available in s3'.format(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id]))
            filename = os.path.join(
                directory,
                get_galaxy_file_name(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id])) + '.hdf5'
            s3_helper.get_file_from_bucket(bucket_name=bucket_name, key_name=key, file_name=filename)

    else:
        LOG.info('The key {0} in bucket {1} does not exist'.format(key, bucket_name))
def regenerated_original_images(galaxy_name, run_id, galaxy_id, s3_helper,
                                connection):
    """
    We need to regenerate the image
    :param galaxy_name:
    :param run_id:
    :param galaxy_id:
    :return: if we succeed
    """
    all_ok = False

    # Get the fits file
    bucket = s3_helper.get_bucket(get_saved_files_bucket())
    galaxy_file_name = get_galaxy_file_name(galaxy_name, run_id, galaxy_id)
    key_name = '{0}/{0}.fits'.format(galaxy_name)
    key = bucket.get_key(key_name)
    if key is None:
        LOG.error('The fits file does not seem to exists')
        return all_ok

    path_name = get_temp_file('fits')
    key.get_contents_to_filename(path_name)

    # Now regenerate
    try:
        image = FitsImage(connection)
        image.build_image(path_name, galaxy_file_name, galaxy_id,
                          get_galaxy_image_bucket())
        all_ok = True
    except Exception:
        LOG.exception('Major error')
        all_ok = False
    finally:
        os.remove(path_name)
    return all_ok
示例#6
0
def delete_galaxy(connection, galaxy_ids):
    try:
        for galaxy_id_str in galaxy_ids:
            transaction = connection.begin()
            galaxy_id1 = int(galaxy_id_str)
            galaxy = connection.execute(
                select([GALAXY
                        ]).where(GALAXY.c.galaxy_id == galaxy_id1)).first()
            if galaxy is None:
                LOG.info('Error: Galaxy with galaxy_id of %d was not found',
                         galaxy_id1)
            else:
                LOG.info('Deleting Galaxy with galaxy_id of %d - %s',
                         galaxy_id1, galaxy[GALAXY.c.name])
                area_count = connection.execute(
                    select([func.count(AREA.c.area_id)
                            ]).where(AREA.c.galaxy_id == galaxy[
                                GALAXY.c.galaxy_id])).first()[0]
                counter = 1

                for area_id1 in connection.execute(
                        select(
                            [AREA.c.area_id]).where(AREA.c.galaxy_id == galaxy[
                                GALAXY.c.galaxy_id]).order_by(AREA.c.area_id)):
                    LOG.info("Deleting galaxy {0} area {1}. {2} of {3}".format(
                        galaxy_id_str, area_id1[0], counter, area_count))
                    connection.execute(PIXEL_RESULT.delete().where(
                        PIXEL_RESULT.c.area_id == area_id1[0]))

                    # Give the rest of the world a chance to access the database
                    time.sleep(0.1)
                    counter += 1

                # Now empty the bucket
                s3helper = S3Helper()
                bucket = s3helper.get_bucket(get_files_bucket())
                galaxy_file_name = get_galaxy_file_name(
                    galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id],
                    galaxy[GALAXY.c.galaxy_id])
                for key in bucket.list(
                        prefix='{0}/sed/'.format(galaxy_file_name)):
                    # Ignore the key
                    if key.key.endswith('/'):
                        continue

                    bucket.delete_key(key)

                # Now the folder
                key = Key(bucket)
                key.key = '{0}/sed/'.format(galaxy_file_name)
                bucket.delete_key(key)

            LOG.info('Galaxy with galaxy_id of %d was deleted', galaxy_id1)
            connection.execute(
                GALAXY.update().where(GALAXY.c.galaxy_id == galaxy_id1).values(
                    status_id=DELETED, status_time=datetime.datetime.now()))
            transaction.commit()

    except Exception:
        LOG.exception('Major error')
示例#7
0
def remove_galaxy_images_folder(old_name, run_id, galaxy_id, bucket):
    """
    Remove the folder
    :param old_name:
    :param run_id:
    :param galaxy_id:
    :return:
    """
    remove_folder(bucket, get_galaxy_file_name(old_name, run_id, galaxy_id) + '/')
示例#8
0
def remove_galaxy_images_folder(old_name, run_id, galaxy_id, bucket):
    """
    Remove the folder
    :param old_name:
    :param run_id:
    :param galaxy_id:
    :return:
    """
    remove_folder(bucket,
                  get_galaxy_file_name(old_name, run_id, galaxy_id) + '/')
示例#9
0
def build_image_key(galaxy_name, run_id, galaxy_id, file_name):
    """
    Build the image key
    :param galaxy_name:
    :param run_id:
    :param galaxy_id:
    :param file_name:
    :return:
    """
    return '{0}/{1}'.format(get_galaxy_file_name(galaxy_name, run_id, galaxy_id), file_name)
示例#10
0
def build_file_key(galaxy_name, run_id, galaxy_id, extension):
    """
    Build the key

    :param galaxy_name:
    :param run_id:
    :param galaxy_id:
    :param extension:
    :return:
    """
    return '{0}/{0}.{1}'.format(get_galaxy_file_name(galaxy_name, run_id, galaxy_id), extension)
示例#11
0
def delete_galaxy(connection, galaxy_ids):
    for galaxy_id in galaxy_ids:
        transaction = connection.begin()
        galaxy = connection.execute(select([GALAXY]).where(GALAXY.c.galaxy_id == galaxy_id)).first()
        if galaxy is None:
            LOG.info('Error: Galaxy with galaxy_id of %d was not found', galaxy_id)
        else:
            LOG.info('Deleting Galaxy with galaxy_id of %d - %s', galaxy_id, galaxy[GALAXY.c.name])
            area_count = connection.execute(select([func.count(AREA.c.area_id)]).where(AREA.c.galaxy_id == galaxy[GALAXY.c.galaxy_id])).first()[0]
            counter = 1

            for area_id1 in connection.execute(select([AREA.c.area_id]).where(AREA.c.galaxy_id == galaxy[GALAXY.c.galaxy_id]).order_by(AREA.c.area_id)):
                LOG.info("Deleting galaxy {0} area {1}. {2} of {3}".format(galaxy_id, area_id1[0], counter, area_count))
                connection.execute(PIXEL_RESULT.delete().where(PIXEL_RESULT.c.area_id == area_id1[0]))

                # Give the rest of the world a chance to access the database
                time.sleep(0.1)
                counter += 1

                if shutdown() is True:
                    transaction.rollback()
                    raise SystemExit

            LOG.info("Deleting FITS headers for galaxy {0}".format(galaxy_id))
            connection.execute(FITS_HEADER.delete().where(FITS_HEADER.c.galaxy_id == galaxy[GALAXY.c.galaxy_id]))

            # Now empty the bucket of the sed files
            s3helper = S3Helper()
            bucket = s3helper.get_bucket(get_sed_files_bucket())
            galaxy_file_name = get_galaxy_file_name(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id])
            for key in bucket.list(prefix='{0}/'.format(galaxy_file_name)):
                # Ignore the key
                if key.key.endswith('/'):
                    continue

                bucket.delete_key(key)

                if shutdown() is True:
                    transaction.rollback()
                    raise SystemExit

            # Now the folder
            key = Key(bucket)
            key.key = '{0}/'.format(galaxy_file_name)
            bucket.delete_key(key)

        LOG.info('Galaxy with galaxy_id of %d was deleted', galaxy_id)
        connection.execute(GALAXY.update().where(GALAXY.c.galaxy_id == galaxy_id).values(status_id=DELETED, status_time=datetime.datetime.now()))

        if shutdown() is True:
            transaction.rollback()
            raise SystemExit

        transaction.commit()
示例#12
0
def build_image_key(galaxy_name, run_id, galaxy_id, file_name):
    """
    Build the image key
    :param galaxy_name:
    :param run_id:
    :param galaxy_id:
    :param file_name:
    :return:
    """
    return '{0}/{1}'.format(
        get_galaxy_file_name(galaxy_name, run_id, galaxy_id), file_name)
示例#13
0
def build_file_key(galaxy_name, run_id, galaxy_id, extension):
    """
    Build the key

    :param galaxy_name:
    :param run_id:
    :param galaxy_id:
    :param extension:
    :return:
    """
    return '{0}/{0}.{1}'.format(
        get_galaxy_file_name(galaxy_name, run_id, galaxy_id), extension)
示例#14
0
def data_string(connection, user, galaxies):
    s3_connection = get_s3_connection()
    bucket = get_bucket(s3_connection, get_galaxy_image_bucket())

    hasParam = 1
    # Prep. data for send to docsmosis
    dl = []
    dl.append('{\n')
    dl.append('"accessKey":"' + DOCMOSIS_KEY + '",\n')
    dl.append('"templateName":"' + DOCMOSIS_TEMPLATE + '",\n')
    dl.append('"outputName":"DetailedUserReport.pdf",\n')
    dl.append('"storeTo":"mailto:' + user.email + '",\n')
    dl.append('"mailSubject":"theSkyNet POGS - Detailed User Report",\n')
    dl.append('"data":{\n')
    dl.append('"user":"******",\n')
    dl.append('"date":"' + str(datetime.date.today()) + '",\n')
    dl.append('"galaxy":[\n')
    # Loop through galaxies user has worked on.
    for galaxy in galaxies:
        galaxy_key = get_galaxy_file_name(galaxy.name, galaxy.run_id, galaxy.galaxy_id)
        dl.append('{\n')
        dl.append('"galid":"' + galaxy.name + ' (version ' + str(galaxy.version_number) + ')",\n')
        dl.append('"pic1":"image:base64:' + user_galaxy_image(bucket, galaxy_key, connection, user.id, galaxy.galaxy_id, 1) + '",\n')
        dl.append('"pic2":"image:base64:' + user_galaxy_image(bucket, galaxy_key, connection, user.id, galaxy.galaxy_id, 2) + '",\n')
        dl.append('"pic3":"image:base64:' + user_galaxy_image(bucket, galaxy_key, connection, user.id, galaxy.galaxy_id, 3) + '",\n')
        dl.append('"pic4":"image:base64:' + user_galaxy_image(bucket, galaxy_key, connection, user.id, galaxy.galaxy_id, 4) + '",\n')
        dl.append('"pic1_label":"' + galaxy_filter_label(connection, galaxy.galaxy_id, 1) + '",\n')
        dl.append('"pic2_label":"' + galaxy_filter_label(connection, galaxy.galaxy_id, 2) + '",\n')
        dl.append('"pic3_label":"' + galaxy_filter_label(connection, galaxy.galaxy_id, 3) + '",\n')
        dl.append('"pic4_label":"' + galaxy_filter_label(connection, galaxy.galaxy_id, 4) + '",\n')
        # Only if there is parameter images
        if hasParam:
            dl.append('"add":"true",\n')
            dl.append('"pic5":"image:base64:' + galaxy_parameter_image(bucket, galaxy_key, 'mu') + '",\n')
            dl.append('"pic6":"image:base64:' + galaxy_parameter_image(bucket, galaxy_key, 'm') + '",\n')
            dl.append('"pic7":"image:base64:' + galaxy_parameter_image(bucket, galaxy_key, 'ldust') + '",\n')
            dl.append('"pic8":"image:base64:' + galaxy_parameter_image(bucket, galaxy_key, 'sfr') + '",\n')
        dl.append('"gatype":"' + galaxy.galaxy_type + '",\n')
        dl.append('"gars":"' + str(galaxy.redshift) + '",\n')
        dl.append('"gades":"' + galaxy.design + '",\n')
        dl.append('"gara_eqj2000":"' + str(galaxy.ra_eqj2000) + '",\n')
        dl.append('"gadec_eqj2000":"' + str(galaxy.dec_eqj2000) + '",\n')
        dl.append('"gara_eqb1950":"' + str(galaxy.ra_eqb1950) + '",\n')
        dl.append('"gadec_eqb1950":"' + str(galaxy.dec_eqb1950) + '",\n')
        dl.append('},\n')
    dl.append(']\n')
    dl.append('}\n')
    dl.append('}\n')

    data = ''.join(dl)

    return data
示例#15
0
def remove_files_with_key(bucket, galaxy_name, run_id, galaxy_id):
    full_key_name = get_galaxy_file_name(galaxy_name, run_id, galaxy_id) + '/'
    for key in bucket.list(prefix=full_key_name):
        # Ignore the key
        if key.key.endswith('/'):
            continue

        bucket.delete_key(key)

    # Now the folder
    key = Key(bucket)
    key.key = full_key_name
    bucket.delete_key(key)
示例#16
0
def remove_files_with_key(bucket, galaxy_name, run_id, galaxy_id):
    full_key_name = get_galaxy_file_name(galaxy_name, run_id, galaxy_id) + '/'
    for key in bucket.list(prefix=full_key_name):
        # Ignore the key
        if key.key.endswith('/'):
            continue

        bucket.delete_key(key)

    # Now the folder
    key = Key(bucket)
    key.key = full_key_name
    bucket.delete_key(key)
def image_files_exist(galaxy_name, run_id, galaxy_id, s3_helper):
    """
    Check if the images exist
    :param galaxy_name:
    :param run_id:
    :param galaxy_id:
    :return:
    """
    bucket = s3_helper.get_bucket(get_galaxy_image_bucket())
    galaxy_file_name = get_galaxy_file_name(galaxy_name, run_id, galaxy_id)
    for image_file in IMAGE_FILES:
        key_name = '{0}/{1}'.format(galaxy_file_name, image_file)
        key = bucket.get_key(key_name)
        if key is None:
            return False

    # if we get here we found them all
    return True
def image_files_exist(galaxy_name, run_id, galaxy_id, s3Helper):
    """
    Check if the images exist
    :param galaxy_name:
    :param run_id:
    :param galaxy_id:
    :return:
    """
    bucket = s3Helper.get_bucket(get_galaxy_image_bucket())
    galaxy_file_name = get_galaxy_file_name(galaxy_name, run_id, galaxy_id)
    for image_file in IMAGE_FILES:
        key_name = '{0}/{1}'.format(galaxy_file_name, image_file)
        key = bucket.get_key(key_name)
        if key is None:
            return False

    # if we get here we found them all
    return True
示例#19
0
def delete_galaxy(connection, galaxy_ids):
    try:
        for galaxy_id_str in galaxy_ids:
            transaction = connection.begin()
            galaxy_id1 = int(galaxy_id_str)
            galaxy = connection.execute(select([GALAXY]).where(GALAXY.c.galaxy_id == galaxy_id1)).first()
            if galaxy is None:
                LOG.info('Error: Galaxy with galaxy_id of %d was not found', galaxy_id1)
            else:
                LOG.info('Deleting Galaxy with galaxy_id of %d - %s', galaxy_id1, galaxy[GALAXY.c.name])
                area_count = connection.execute(select([func.count(AREA.c.area_id)]).where(AREA.c.galaxy_id == galaxy[GALAXY.c.galaxy_id])).first()[0]
                counter = 1

                for area_id1 in connection.execute(select([AREA.c.area_id]).where(AREA.c.galaxy_id == galaxy[GALAXY.c.galaxy_id]).order_by(AREA.c.area_id)):
                    LOG.info("Deleting galaxy {0} area {1}. {2} of {3}".format(galaxy_id_str, area_id1[0], counter, area_count))
                    connection.execute(PIXEL_RESULT.delete().where(PIXEL_RESULT.c.area_id == area_id1[0]))

                    # Give the rest of the world a chance to access the database
                    time.sleep(0.1)
                    counter += 1

                # Now empty the bucket
                s3helper = S3Helper()
                bucket = s3helper.get_bucket(get_files_bucket())
                galaxy_file_name = get_galaxy_file_name(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id])
                for key in bucket.list(prefix='{0}/sed/'.format(galaxy_file_name)):
                    # Ignore the key
                    if key.key.endswith('/'):
                        continue

                    bucket.delete_key(key)

                # Now the folder
                key = Key(bucket)
                key.key = '{0}/sed/'.format(galaxy_file_name)
                bucket.delete_key(key)

            LOG.info('Galaxy with galaxy_id of %d was deleted', galaxy_id1)
            connection.execute(GALAXY.update().where(GALAXY.c.galaxy_id == galaxy_id1).values(status_id=DELETED, status_time=datetime.datetime.now()))
            transaction.commit()

    except Exception:
        LOG.exception('Major error')
示例#20
0
        galaxy_ids = args['galaxy_id']

    for galaxy_id_str in galaxy_ids:
        start_time = time.time()
        area_count = 0
        pixel_count = 0

        galaxy_id1 = int(galaxy_id_str)
        galaxy = connection.execute(select([GALAXY]).where(GALAXY.c.galaxy_id == galaxy_id1)).first()
        if galaxy is None:
            LOG.info('Error: Galaxy with galaxy_id of %d was not found', galaxy_id1)
        else:
            LOG.info('Archiving Galaxy with galaxy_id of %d - %s', galaxy_id1, galaxy[GALAXY.c.name])

            # Copy the galaxy details
            galaxy_file_name = get_galaxy_file_name(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id])
            filename = os.path.join(OUTPUT_DIRECTORY, '{0}.hdf5'.format(galaxy_file_name))

            h5_file = h5py.File(filename, 'w')

            # Build the groups
            galaxy_group = h5_file.create_group('galaxy')
            area_group = galaxy_group.create_group('area')
            pixel_group = galaxy_group.create_group('pixel')

            # Write the galaxy data
            galaxy_group.attrs['galaxy_id'] = galaxy[GALAXY.c.galaxy_id]
            galaxy_group.attrs['run_id'] = galaxy[GALAXY.c.run_id]
            galaxy_group.attrs['name'] = galaxy[GALAXY.c.name]
            galaxy_group.attrs['dimension_x'] = galaxy[GALAXY.c.dimension_x]
            galaxy_group.attrs['dimension_y'] = galaxy[GALAXY.c.dimension_y]
示例#21
0
def build_png_image_ami():
    """
    Build the images

    :return:
    """
    # First check the galaxy exists in the database
    engine = create_engine(DB_LOGIN)
    connection = engine.connect()
    try:
        query = select([GALAXY]).distinct().where(and_(AREA.c.galaxy_id == GALAXY.c.galaxy_id, AREA.c.update_time >= GALAXY.c.image_time))

        galaxy_count = 0
        s3helper = S3Helper()
        bucket_name = get_galaxy_image_bucket()

        # Start the shutdown signal poller to check when this instance must close
        start_poll()
        galaxy_list = []

        for galaxy in connection.execute(query):
            galaxy_list.append(galaxy)

        total_galaxies = len(galaxy_list)
        processed_galaxies = 0
        processed_print_point = 50

        for galaxy in galaxy_list:

            if processed_galaxies == processed_print_point:
                LOG.info('{0} out of {1} galaxies processed'.format(processed_galaxies, total_galaxies))
                processed_print_point += 50

            processed_galaxies += 1

            LOG.info('Working on galaxy %s', galaxy[GALAXY.c.name])
            array = numpy.empty((galaxy[GALAXY.c.dimension_y], galaxy[GALAXY.c.dimension_x], len(PNG_IMAGE_NAMES)), dtype=numpy.float)
            array.fill(numpy.NaN)

            # Return the rows
            pixel_count = 0
            pixels_processed = 0
            for row in connection.execute(select([PIXEL_RESULT]).where((PIXEL_RESULT.c.galaxy_id == galaxy[GALAXY.c.galaxy_id]) and PIXEL_RESULT.c.x > -1)):
                row__x = row[PIXEL_RESULT.c.x]
                row__y = row[PIXEL_RESULT.c.y]
                pixel_count += 1
                if row[PIXEL_RESULT.c.workunit_id] is not None:
                    pixels_processed += 1

                    # Defend against bad values
                    if row[PIXEL_RESULT.c.mu] is not None:
                        array[row__y, row__x, 0] = row[PIXEL_RESULT.c.mu]
                    if row[PIXEL_RESULT.c.m] is not None:
                        array[row__y, row__x, 1] = row[PIXEL_RESULT.c.m]
                    if row[PIXEL_RESULT.c.ldust] is not None:
                        array[row__y, row__x, 2] = row[PIXEL_RESULT.c.ldust]
                    if row[PIXEL_RESULT.c.sfr] is not None:
                        # the SFR is a log
                        array[row__y, row__x, 3] = math.pow(10, row[PIXEL_RESULT.c.sfr])

            connection.execute(GALAXY.update()
                               .where(GALAXY.c.galaxy_id == galaxy[GALAXY.c.galaxy_id])
                               .values(image_time=datetime.datetime.now(), pixel_count=pixel_count, pixels_processed=pixels_processed))
            galaxy_count += 1

            # Now write the files
            black_rgb = (0, 0, 0)
            for name in PNG_IMAGE_NAMES:
                value = 0
                height = galaxy[GALAXY.c.dimension_y]
                width = galaxy[GALAXY.c.dimension_x]
                idx = 0
                if name == 'mu':
                    idx = 0
                elif name == 'm':
                    idx = 1
                elif name == 'ldust':
                    idx = 2
                elif name == 'sfr':
                    idx = 3

                values = []
                for x in range(0, width - 1):
                    for y in range(0, height - 1):
                        value = array[y, x, idx]
                        if not math.isnan(value) and value > 0:
                            values.append(value)

                values.sort()
                if len(values) > 1000:
                    top_count = int(len(values) * 0.005)
                    top_value = values[len(values) - top_count]
                elif len(values) > 0:
                    top_value = values[len(values) - 1]
                else:
                    top_value = 1
                if len(values) > 1:
                    median_value = values[int(len(values) / 2)]
                elif len(values) > 0:
                    median_value = values[0]
                else:
                    median_value = 1

                sigma = 1 / median_value
                multiplier = 255.0 / math.asinh(top_value * sigma)

                image = Image.new("RGB", (width, height), black_rgb)
                for x in range(0, width - 1):
                    for y in range(0, height - 1):
                        value = array[y, x, idx]
                        if not math.isnan(value) and value > 0:
                            value = int(math.asinh(value * sigma) * multiplier)
                            if value > 255:
                                value = 255
                            red = FIRE_R[value]
                            green = FIRE_G[value]
                            blue = FIRE_B[value]
                            image.putpixel((x, height - y - 1), (red, green, blue))

                file_name = '{0}/image.png'.format(POGS_TMP)
                image.save(file_name)
                s3helper.add_file_to_bucket(bucket_name,
                                            get_build_png_name(get_galaxy_file_name(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id]),
                                                               name),
                                            file_name)
            if shutdown() is True:
                LOG.info('Spot Instance Terminate Notice received, build_png_image is shutting down')
                break

    except:
        LOG.exception('An exception occurred.')

    finally:
        connection.close()

    LOG.info('Built images for %d galaxies', galaxy_count)
示例#22
0
def delete_galaxy(connection, galaxy_ids):
    for galaxy_id in galaxy_ids:
        transaction = connection.begin()
        galaxy = connection.execute(
            select([GALAXY]).where(GALAXY.c.galaxy_id == galaxy_id)).first()
        if galaxy is None:
            LOG.info('Error: Galaxy with galaxy_id of %d was not found',
                     galaxy_id)
        else:
            LOG.info('Deleting Galaxy with galaxy_id of %d - %s', galaxy_id,
                     galaxy[GALAXY.c.name])
            area_count = connection.execute(
                select([func.count(AREA.c.area_id)]).where(
                    AREA.c.galaxy_id == galaxy[GALAXY.c.galaxy_id])).first()[0]
            counter = 1

            for area_id1 in connection.execute(
                    select([AREA.c.area_id]).where(AREA.c.galaxy_id == galaxy[
                        GALAXY.c.galaxy_id]).order_by(AREA.c.area_id)):
                LOG.info("Deleting galaxy {0} area {1}. {2} of {3}".format(
                    galaxy_id, area_id1[0], counter, area_count))
                connection.execute(PIXEL_RESULT.delete().where(
                    PIXEL_RESULT.c.area_id == area_id1[0]))

                # Give the rest of the world a chance to access the database
                time.sleep(0.1)
                counter += 1

                if shutdown() is True:
                    transaction.rollback()
                    raise SystemExit

            LOG.info("Deleting FITS headers for galaxy {0}".format(galaxy_id))
            connection.execute(FITS_HEADER.delete().where(
                FITS_HEADER.c.galaxy_id == galaxy[GALAXY.c.galaxy_id]))

            # Now empty the bucket of the sed files
            s3helper = S3Helper()
            bucket = s3helper.get_bucket(get_sed_files_bucket())
            galaxy_file_name = get_galaxy_file_name(galaxy[GALAXY.c.name],
                                                    galaxy[GALAXY.c.run_id],
                                                    galaxy[GALAXY.c.galaxy_id])
            for key in bucket.list(prefix='{0}/'.format(galaxy_file_name)):
                # Ignore the key
                if key.key.endswith('/'):
                    continue

                bucket.delete_key(key)

                if shutdown() is True:
                    transaction.rollback()
                    raise SystemExit

            # Now the folder
            key = Key(bucket)
            key.key = '{0}/'.format(galaxy_file_name)
            bucket.delete_key(key)

        LOG.info('Galaxy with galaxy_id of %d was deleted', galaxy_id)
        connection.execute(
            GALAXY.update().where(GALAXY.c.galaxy_id == galaxy_id).values(
                status_id=DELETED, status_time=datetime.datetime.now()))

        if shutdown() is True:
            transaction.rollback()
            raise SystemExit

        transaction.commit()
def archive_to_hdf5(connection):
    """
    Archive data to an HDF5 file

    :param connection:
    :return:
    """
    # Load the parameter name map
    map_parameter_name = {}
    for parameter_name in connection.execute(select([PARAMETER_NAME])):
        map_parameter_name[parameter_name[PARAMETER_NAME.c.name]] = parameter_name[PARAMETER_NAME.c.parameter_name_id]

    # Look in the database for the galaxies
    galaxy_ids = []
    for galaxy in connection.execute(select([GALAXY]).where(GALAXY.c.status_id == PROCESSED).order_by(GALAXY.c.galaxy_id)):
        galaxy_ids.append(galaxy[GALAXY.c.galaxy_id])

    for galaxy_id_str in galaxy_ids:
        start_time = time.time()

        galaxy_id1 = int(galaxy_id_str)
        galaxy = connection.execute(select([GALAXY]).where(GALAXY.c.galaxy_id == galaxy_id1)).first()
        if galaxy is None:
            LOG.info('Error: Galaxy with galaxy_id of %d was not found', galaxy_id1)
        else:
            LOG.info('Archiving Galaxy with galaxy_id of %d - %s', galaxy_id1, galaxy[GALAXY.c.name])

            # Copy the galaxy details
            galaxy_file_name = get_galaxy_file_name(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id])
            filename = os.path.join(HDF5_OUTPUT_DIRECTORY, '{0}.hdf5'.format(galaxy_file_name))

            h5_file = h5py.File(filename, 'w')

            # Build the groups
            galaxy_group = h5_file.create_group('galaxy')
            area_group = galaxy_group.create_group('area')
            pixel_group = galaxy_group.create_group('pixel')

            # Write the galaxy data
            galaxy_group.attrs['galaxy_id'] = galaxy[GALAXY.c.galaxy_id]
            galaxy_group.attrs['run_id'] = galaxy[GALAXY.c.run_id]
            galaxy_group.attrs['name'] = galaxy[GALAXY.c.name]
            galaxy_group.attrs['dimension_x'] = galaxy[GALAXY.c.dimension_x]
            galaxy_group.attrs['dimension_y'] = galaxy[GALAXY.c.dimension_y]
            galaxy_group.attrs['dimension_z'] = galaxy[GALAXY.c.dimension_z]
            galaxy_group.attrs['redshift'] = float(galaxy[GALAXY.c.redshift])
            galaxy_group.attrs['create_time'] = str(galaxy[GALAXY.c.create_time])
            galaxy_group.attrs['image_time'] = str(galaxy[GALAXY.c.image_time])
            galaxy_group.attrs['galaxy_type'] = galaxy[GALAXY.c.galaxy_type]
            galaxy_group.attrs['ra_cent'] = galaxy[GALAXY.c.ra_cent]
            galaxy_group.attrs['dec_cent'] = galaxy[GALAXY.c.dec_cent]
            galaxy_group.attrs['sigma'] = float(galaxy[GALAXY.c.sigma])
            galaxy_group.attrs['pixel_count'] = galaxy[GALAXY.c.pixel_count]
            galaxy_group.attrs['pixels_processed'] = galaxy[GALAXY.c.pixels_processed]
            galaxy_group.attrs['output_format'] = OUTPUT_FORMAT_1_03

            galaxy_id_aws = galaxy[GALAXY.c.galaxy_id]

            # Store the data associated with the galaxy
            store_fits_header(connection, galaxy_id_aws, galaxy_group)
            store_image_filters(connection, galaxy_id_aws, galaxy_group)

            # Store the data associated with the areas
            area_count = store_area(connection, galaxy_id_aws, area_group)
            store_area_user(connection, galaxy_id_aws, area_group)
            h5_file.flush()

            # Store the values associated with a pixel
            pixel_count = store_pixels(connection,
                                       galaxy_file_name,
                                       pixel_group,
                                       galaxy[GALAXY.c.dimension_x],
                                       galaxy[GALAXY.c.dimension_y],
                                       galaxy[GALAXY.c.dimension_z],
                                       area_count,
                                       galaxy[GALAXY.c.galaxy_id],
                                       map_parameter_name)

            # Flush the HDF5 data to disk
            h5_file.flush()
            h5_file.close()

            # Move the file
            to_store = os.path.join(HDF5_OUTPUT_DIRECTORY, 'to_store')
            LOG.info('Moving the file %s to %s', filename, to_store)
            if not os.path.exists(to_store):
                os.makedirs(to_store)

            # Sometimes the file can exist so remove it
            old_filename = os.path.join(to_store, '{0}.hdf5'.format(galaxy_file_name))
            LOG.info('Checking for old file %s', old_filename)
            if os.path.exists(old_filename):
                LOG.info('Removing old file %s', old_filename)
                os.remove(old_filename)

            shutil.move(filename, to_store)

            connection.execute(GALAXY.update().where(GALAXY.c.galaxy_id == galaxy_id1).values(status_id=ARCHIVED, status_time=datetime.datetime.now()))

            end_time = time.time()
            LOG.info('Galaxy with galaxy_id of %d was archived.', galaxy_id1)
            LOG.info('Copied %d areas %d pixels.', area_count, pixel_count)
            total_time = end_time - start_time
            LOG.info('Total time %d mins %.1f secs', int(total_time / 60), total_time % 60)
示例#24
0
    def process_file(self, registration):
        """
        Process a registration.

        :param registration:
        """
        self._filename = registration[REGISTER.c.filename]
        self._galaxy_name = registration[REGISTER.c.galaxy_name]
        self._galaxy_type = registration[REGISTER.c.galaxy_type]
        self._priority = registration[REGISTER.c.priority]
        self._redshift = registration[REGISTER.c.redshift]
        self._run_id = registration[REGISTER.c.run_id]
        self._sigma = registration[REGISTER.c.sigma]
        self._sigma_filename = registration[REGISTER.c.sigma_filename]

        # Have we files that we can use for this?
        self._rounded_redshift = self._get_rounded_redshift()
        if self._rounded_redshift is None:
            LOG.error('No models matching the redshift of %.4f', self._redshift)
            return 0

        self._hdu_list = pyfits.open(self._filename, memmap=True)
        self._layer_count = len(self._hdu_list)

        # Do we need to open and sort the S/N Ratio file
        if self._sigma_filename is not None:
            self._sigma = 0.0
            self._signal_noise_hdu = pyfits.open(self._sigma_filename, memmap=True)
            if self._layer_count != len(self._signal_noise_hdu):
                LOG.error('The layer counts do not match %d vs %d', self._layer_count, len(self._signal_noise_hdu))
                return 0, 0
        else:
            self._sigma = float(self._sigma)

        self._end_y = self._hdu_list[0].data.shape[0]
        self._end_x = self._hdu_list[0].data.shape[1]

        LOG.info("Image dimensions: %(x)d x %(y)d x %(z)d => %(pix).2f Mpixels" % {'x': self._end_x, 'y': self._end_y, 'z': self._layer_count, 'pix': self._end_x * self._end_y / 1000000.0})

        # Get the flops estimate amd cobblestone factor
        run = self._connection.execute(select([RUN]).where(RUN.c.run_id == self._run_id)).first()
        self._fpops_est_per_pixel = run[RUN.c.fpops_est]
        self._cobblestone_scaling_factor = run[RUN.c.cobblestone_factor]

        # Create and save the object
        datetime_now = datetime.now()
        result = self._connection.execute(GALAXY.insert().values(name=self._galaxy_name,
                                                                 dimension_x=self._end_x,
                                                                 dimension_y=self._end_y,
                                                                 dimension_z=self._layer_count,
                                                                 redshift=self._redshift,
                                                                 sigma=self._sigma,
                                                                 create_time=datetime_now,
                                                                 image_time=datetime_now,
                                                                 galaxy_type=self._galaxy_type,
                                                                 ra_cent=0,
                                                                 dec_cent=0,
                                                                 pixel_count=0,
                                                                 pixels_processed=0,
                                                                 run_id=self._run_id))
        self._galaxy_id = result.inserted_primary_key[0]
        LOG.info("Writing %s to database", self._galaxy_name)

        # Store the fits header
        self._store_fits_header()

        # Get the filters we're using for this run and sort the layers
        self._get_filters_sort_layers()

        # Build the template file we need if necessary
        self._build_template_file()

        # Copy the filter and model files we need
        self._copy_important_files()

        # Now break up the galaxy into chunks
        self._break_up_galaxy()
        self._connection.execute(GALAXY.update().where(GALAXY.c.galaxy_id == self._galaxy_id).values(pixel_count=self._pixel_count))

        LOG.info('Building the images')
        galaxy_file_name = get_galaxy_file_name(self._galaxy_name, self._run_id, self._galaxy_id)
        s3helper = S3Helper()
        image = FitsImage(self._connection)
        image.build_image(self._filename, galaxy_file_name, self._galaxy_id, get_galaxy_image_bucket())

        # Copy the fits file to S3 - renamed to make it unique
        bucket_name = get_files_bucket()
        s3helper.add_file_to_bucket(bucket_name, get_key_fits(self._galaxy_name, self._run_id, self._galaxy_id), self._filename)
        if self._sigma_filename is not None:
            s3helper.add_file_to_bucket(bucket_name, get_key_sigma_fits(self._galaxy_name, self._run_id, self._galaxy_id), self._sigma_filename)

        return self._work_units_added, self._pixel_count
示例#25
0
            median_value = values[int(len(values) / 2)]
        elif len(values) > 0:
            median_value = values[0]
        else:
            median_value = 1

        sigma = 1 / median_value
        multiplier = 255.0 / math.asinh(top_value * sigma)

        image = Image.new("RGB", (width, height), blackRGB)
        for x in range(0, width - 1):
            for y in range(0, height - 1):
                value = array[y, x, idx]
                if not math.isnan(value) and value > 0:
                    value = int(math.asinh(value * sigma) * multiplier)
                    if value > 255:
                        value = 255
                    red = FIRE_R[value]
                    green = FIRE_G[value]
                    blue = FIRE_B[value]
                    image.putpixel((x, height - y - 1), (red, green, blue))

        file_name = '{0}/image.png'.format(POGS_TMP)
        image.save(file_name)
        s3helper.add_file_to_bucket(bucket_name,
                                    get_build_png_name(get_galaxy_file_name(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id]),
                                                       name),
                                    file_name)

LOG.info('Built images for %d galaxies', galaxy_count)
示例#26
0
def generate_files(connection, hdf5_request_galaxy_ids, email, features, layers, pixel_types):
    """
    Get the FITS files for this request

    :type connection: The database connection
    :param pixel_types:
    :param hdf5_request_galaxy_ids: the galaxy id
    :param email:
    :param features:
    :param layers:
    :return:
    """
    uuid_string = str(uuid.uuid4())
    results = []
    available_galaxies = []
    s3_helper = S3Helper()
    bucket_name = get_saved_files_bucket()

    # Check whether all the requested galaxies are available or not.
    for hdf5_request_galaxy in hdf5_request_galaxy_ids:
        galaxy = connection.execute(select([GALAXY]).where(GALAXY.c.galaxy_id == hdf5_request_galaxy.galaxy_id)).first()
        hdf5_request_galaxy = connection.execute(select([HDF5_REQUEST_GALAXY])
                                                 .where(HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id == hdf5_request_galaxy.hdf5_request_galaxy_id)).first()
        state = hdf5_request_galaxy.state

        if state is not 0:
            LOG.info('Skipping {0}, state is {1}'.format(galaxy[GALAXY.c.name], state))
            continue  # Skip

        key = get_key_hdf5(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id])

        if s3_helper.file_exists(bucket_name, key):
            if s3_helper.file_archived(bucket_name, key):
                # file is archived
                if s3_helper.file_restoring(bucket_name, key):
                    # if file is restoring, just need to wait for it
                    LOG.info('Galaxy {0} is still restoring from glacier'.format(galaxy[GALAXY.c.name]))
                else:
                    # if file is not restoring, need to request.
                    file_size = s3_helper.file_size(bucket_name, key)

                    if restore_file_size_check(connection, bucket_name, file_size):
                        # We're good to restore
                        LOG.info('Making request for archived galaxy {0}'.format(galaxy[GALAXY.c.name]))
                        s3_helper.restore_archived_file(bucket_name, key)

                        connection.execute(HDF5_REQUEST_GALAXY_SIZE.insert(),
                                           hdf5_request_galaxy_id=hdf5_request_galaxy['hdf5_request_galaxy_id'],
                                           size=file_size,
                                           request_time=seconds_since_epoch(datetime.now()))
                    else:
                        # Don't restore or we risk spending a lot of money
                        LOG.info('Daily galaxy restore size hit. Cannot request archived galaxy.')
            else:
                # file is not archived
                LOG.info('Galaxy {0} is available in s3'.format(galaxy[GALAXY.c.name]))
                available_galaxies.append(hdf5_request_galaxy)
        else:
            LOG.error('Galaxy {0} does not exist on s3 or glacier!'.format(galaxy[GALAXY.c.name]))

    total_request_galaxies = len(hdf5_request_galaxy_ids)
    LOG.info('Need to have {0} galaxies available ({1} currently available)'.format(total_request_galaxies * GALAXY_EMAIL_THRESHOLD, len(available_galaxies)))
    if len(available_galaxies) >= total_request_galaxies * GALAXY_EMAIL_THRESHOLD:  # Only proceed if more than the threshold of galaxies are available
        LOG.info('{0}/{1} (> {2}%) galaxies are available. Email will be sent'.format(
            len(available_galaxies),
            total_request_galaxies,
            GALAXY_EMAIL_THRESHOLD * 100)
        )
        remaining_galaxies = total_request_galaxies - len(available_galaxies)

        for hdf5_request_galaxy in available_galaxies:
            result = HDF5ToFitsResult()
            results.append(result)
            connection.execute(HDF5_REQUEST_GALAXY.update().where(HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id == hdf5_request_galaxy.hdf5_request_galaxy_id).values(state=1))
            # noinspection PyBroadException
            try:
                galaxy = connection.execute(select([GALAXY]).where(GALAXY.c.galaxy_id == hdf5_request_galaxy.galaxy_id)).first()
                result.galaxy_name = galaxy[GALAXY.c.name]
                LOG.info('Processing {0} ({1}) for {2}'.format(galaxy[GALAXY.c.name], galaxy[GALAXY.c.galaxy_id], email))

                # make sure the galaxy is available
                if galaxy[GALAXY.c.status_id] == STORED or galaxy[GALAXY.c.status_id] == DELETED:
                    output_dir = tempfile.mkdtemp()
                    try:
                        s3_helper = S3Helper()
                        LOG.info('Getting HDF5 file to {0}'.format(output_dir))
                        tmp_file = get_hdf5_file(s3_helper, output_dir, galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id])
                        LOG.info('File stored in {0}'.format(tmp_file))

                        # We have the file
                        if os.path.isfile(tmp_file):
                            int_flux_output = os.path.join(output_dir, 'intflux')
                            rad_output = os.path.join(output_dir, 'rad')

                            if not os.path.exists(int_flux_output):
                                os.mkdir(int_flux_output)

                            if not os.path.exists(rad_output):
                                os.mkdir(rad_output)

                            file_names = process_hdf5_file(
                                tmp_file,
                                galaxy[GALAXY.c.name],
                                galaxy[GALAXY.c.galaxy_id],
                                pixel_types,
                                features,
                                result,
                                layers,
                                output_dir,
                                rad_output,
                                int_flux_output,
                            )

                            url = zip_files(
                                s3_helper,
                                get_galaxy_file_name(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id]),
                                uuid_string,
                                file_names,
                                output_dir
                            )

                            connection.execute(
                                HDF5_REQUEST_GALAXY.update().
                                where(HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id == hdf5_request_galaxy.hdf5_request_galaxy_id).
                                values(state=2, link=url, link_expires_at=datetime.now() + timedelta(days=10)))

                            result.error = None
                            result.link = url

                    except S3ResponseError as e:  # Handling for a strange s3 error
                        LOG.error('Error retrieving galaxy {0} from s3. Retrying next run'.format(galaxy[GALAXY.c.name]))
                        LOG.error('{0}'.format(str(e)))
                        key = get_key_hdf5(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id])
                        LOG.info('Key: {0}'.format(key))
                        LOG.info('Exists: {0}'.format(s3_helper.file_exists(bucket_name, key)))
                        result.error = traceback.format_exc()
                        remaining_galaxies += 1
                    finally:
                        # Delete the temp files now we're done
                        shutil.rmtree(output_dir)

                else:
                    connection.execute(HDF5_REQUEST_GALAXY.update().
                                       where(HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id == hdf5_request_galaxy.hdf5_request_galaxy_id).
                                       values(state=3))
                    result.error = 'Cannot process {0} ({1}) as the HDF5 file has not been generated'.format(galaxy[GALAXY.c.name], galaxy[GALAXY.c.galaxy_id])
                    LOG.info(result.error)
            except:
                LOG.error('Major error')
                result.error = traceback.format_exc()
                connection.execute(HDF5_REQUEST_GALAXY.update().
                                   where(HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id == hdf5_request_galaxy.hdf5_request_galaxy_id).
                                   values(state=3))

        send_email(email, results, features, layers, pixel_types, remaining_galaxies)
def generate_files(connection, hdf5_request_galaxy_ids, email, features, layers):
    """
    Get the FITS files for this request

    :param hdf5_request_galaxy_ids: the galaxy id
    :param email:
    :param features:
    :param layers:
    :return:
    """
    uuid_string = str(uuid.uuid4())
    results = []
    for hdf5_request_galaxy in hdf5_request_galaxy_ids:
        result = HDF5ToFitsResult()
        results.append(result)
        connection.execute(HDF5_REQUEST_GALAXY.update().where(HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id == hdf5_request_galaxy.hdf5_request_galaxy_id).values(state=1))
        try:
            galaxy = connection.execute(select([GALAXY]).where(GALAXY.c.galaxy_id == hdf5_request_galaxy.galaxy_id)).first()
            result.galaxy_name = galaxy[GALAXY.c.name]
            LOG.info('Processing {0} ({1}) for {2}'.format(galaxy[GALAXY.c.name], galaxy[GALAXY.c.galaxy_id], email))

            # make sure the galaxy is available
            if galaxy[GALAXY.c.status_id] == STORED or galaxy[GALAXY.c.status_id] == DELETED:
                output_dir = tempfile.mkdtemp()
                try:
                    s3_helper = S3Helper()
                    LOG.info('Getting HDF5 file to {0}'.format(output_dir))
                    tmp_file = get_hdf5_file(s3_helper, output_dir, galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id])
                    LOG.info('File stored in {0}'.format(tmp_file))

                    # We have the file
                    if os.path.isfile(tmp_file):
                        h5_file = h5py.File(tmp_file, 'r')
                        galaxy_group = h5_file['galaxy']
                        pixel_group = galaxy_group['pixel']

                        file_names = []
                        for feature in features:
                            for layer in layers:
                                LOG.info('Processing {0} - {1}'.format(feature, layer))
                                file_names.append(build_fits_image(feature, layer, output_dir, galaxy_group, pixel_group, galaxy[GALAXY.c.name]))

                        h5_file.close()
                        url = zip_files(s3_helper, get_galaxy_file_name(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy[GALAXY.c.galaxy_id]), uuid_string, file_names, output_dir)
                        connection.execute(HDF5_REQUEST_GALAXY.update().
                                           where(HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id == hdf5_request_galaxy.hdf5_request_galaxy_id).
                                           values(state=2, link=url, link_expires_at=datetime.now() + timedelta(days=10)))
                        result.error = None
                        result.link = url
                finally:
                    # Delete the temp files now we're done
                    shutil.rmtree(output_dir)

            else:
                connection.execute(HDF5_REQUEST_GALAXY.update().
                                   where(HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id == hdf5_request_galaxy.hdf5_request_galaxy_id).
                                   values(state=3))
                result.error = 'Cannot process {0} ({1}) as the HDF5 file has not been generated'.format(galaxy[GALAXY.c.name], galaxy[GALAXY.c.galaxy_id])
                LOG.info(result.error)
        except:
            LOG.error('Major error')
            result.error = traceback.format_exc()
            connection.execute(HDF5_REQUEST_GALAXY.update().
                               where(HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id == hdf5_request_galaxy.hdf5_request_galaxy_id).
                               values(state=3))

    send_email(email, results, features, layers)
示例#28
0
def data_string(connection, user, galaxies):
    s3_connection = get_s3_connection()
    bucket = get_bucket(s3_connection, get_galaxy_image_bucket())

    hasParam = 1
    # Prep. data for send to docsmosis
    dl = []
    dl.append('{\n')
    dl.append('"accessKey":"' + DOCMOSIS_KEY + '",\n')
    dl.append('"templateName":"' + DOCMOSIS_TEMPLATE + '",\n')
    dl.append('"outputName":"DetailedUserReport.pdf",\n')
    dl.append('"storeTo":"mailto:' + user.email + '",\n')
    dl.append('"mailSubject":"theSkyNet POGS - Detailed User Report",\n')
    dl.append('"data":{\n')
    dl.append('"user":"******",\n')
    dl.append('"date":"' + str(datetime.date.today()) + '",\n')
    dl.append('"galaxy":[\n')
    # Loop through galaxies user has worked on.
    for galaxy in galaxies:
        galaxy_key = get_galaxy_file_name(galaxy.name, galaxy.run_id,
                                          galaxy.galaxy_id)
        dl.append('{\n')
        dl.append('"galid":"' + galaxy.name + ' (version ' +
                  str(galaxy.version_number) + ')",\n')
        dl.append('"pic1":"image:base64:' + user_galaxy_image(
            bucket, galaxy_key, connection, user.id, galaxy.galaxy_id, 1) +
                  '",\n')
        dl.append('"pic2":"image:base64:' + user_galaxy_image(
            bucket, galaxy_key, connection, user.id, galaxy.galaxy_id, 2) +
                  '",\n')
        dl.append('"pic3":"image:base64:' + user_galaxy_image(
            bucket, galaxy_key, connection, user.id, galaxy.galaxy_id, 3) +
                  '",\n')
        dl.append('"pic4":"image:base64:' + user_galaxy_image(
            bucket, galaxy_key, connection, user.id, galaxy.galaxy_id, 4) +
                  '",\n')
        dl.append('"pic1_label":"' +
                  galaxy_filter_label(connection, galaxy.galaxy_id, 1) +
                  '",\n')
        dl.append('"pic2_label":"' +
                  galaxy_filter_label(connection, galaxy.galaxy_id, 2) +
                  '",\n')
        dl.append('"pic3_label":"' +
                  galaxy_filter_label(connection, galaxy.galaxy_id, 3) +
                  '",\n')
        dl.append('"pic4_label":"' +
                  galaxy_filter_label(connection, galaxy.galaxy_id, 4) +
                  '",\n')
        # Only if there is parameter images
        if hasParam:
            dl.append('"add":"true",\n')
            dl.append('"pic5":"image:base64:' +
                      galaxy_parameter_image(bucket, galaxy_key, 'mu') +
                      '",\n')
            dl.append('"pic6":"image:base64:' +
                      galaxy_parameter_image(bucket, galaxy_key, 'm') + '",\n')
            dl.append('"pic7":"image:base64:' +
                      galaxy_parameter_image(bucket, galaxy_key, 'ldust') +
                      '",\n')
            dl.append('"pic8":"image:base64:' +
                      galaxy_parameter_image(bucket, galaxy_key, 'sfr') +
                      '",\n')
        dl.append('"gatype":"' + galaxy.galaxy_type + '",\n')
        dl.append('"gars":"' + str(galaxy.redshift) + '",\n')
        dl.append('"gades":"' + galaxy.design + '",\n')
        dl.append('"gara_eqj2000":"' + str(galaxy.ra_eqj2000) + '",\n')
        dl.append('"gadec_eqj2000":"' + str(galaxy.dec_eqj2000) + '",\n')
        dl.append('"gara_eqb1950":"' + str(galaxy.ra_eqb1950) + '",\n')
        dl.append('"gadec_eqb1950":"' + str(galaxy.dec_eqb1950) + '",\n')
        dl.append('},\n')
    dl.append(']\n')
    dl.append('}\n')
    dl.append('}\n')

    data = ''.join(dl)

    return data
示例#29
0
def generate_files(connection, hdf5_request_galaxy_ids, email, features,
                   layers, pixel_types):
    """
    Get the FITS files for this request

    :type connection: The database connection
    :param pixel_types:
    :param hdf5_request_galaxy_ids: the galaxy id
    :param email:
    :param features:
    :param layers:
    :return:
    """
    uuid_string = str(uuid.uuid4())
    results = []
    available_galaxies = []
    s3_helper = S3Helper()
    bucket_name = get_saved_files_bucket()

    # Check whether all the requested galaxies are available or not.
    for hdf5_request_galaxy in hdf5_request_galaxy_ids:
        galaxy = connection.execute(
            select([GALAXY]).where(
                GALAXY.c.galaxy_id == hdf5_request_galaxy.galaxy_id)).first()
        hdf5_request_galaxy = connection.execute(
            select([
                HDF5_REQUEST_GALAXY
            ]).where(HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id ==
                     hdf5_request_galaxy.hdf5_request_galaxy_id)).first()
        state = hdf5_request_galaxy.state

        if state is not 0:
            LOG.info('Skipping {0}, state is {1}'.format(
                galaxy[GALAXY.c.name], state))
            continue  # Skip

        key = get_key_hdf5(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id],
                           galaxy[GALAXY.c.galaxy_id])

        if s3_helper.file_exists(bucket_name, key):
            if s3_helper.file_archived(bucket_name, key):
                # file is archived
                if s3_helper.file_restoring(bucket_name, key):
                    # if file is restoring, just need to wait for it
                    LOG.info(
                        'Galaxy {0} is still restoring from glacier'.format(
                            galaxy[GALAXY.c.name]))
                else:
                    # if file is not restoring, need to request.
                    file_size = s3_helper.file_size(bucket_name, key)

                    if restore_file_size_check(connection, bucket_name,
                                               file_size):
                        # We're good to restore
                        LOG.info(
                            'Making request for archived galaxy {0}'.format(
                                galaxy[GALAXY.c.name]))
                        s3_helper.restore_archived_file(bucket_name, key)

                        connection.execute(
                            HDF5_REQUEST_GALAXY_SIZE.insert(),
                            hdf5_request_galaxy_id=hdf5_request_galaxy[
                                'hdf5_request_galaxy_id'],
                            size=file_size,
                            request_time=seconds_since_epoch(datetime.now()))
                    else:
                        # Don't restore or we risk spending a lot of money
                        LOG.info(
                            'Daily galaxy restore size hit. Cannot request archived galaxy.'
                        )
            else:
                # file is not archived
                LOG.info('Galaxy {0} is available in s3'.format(
                    galaxy[GALAXY.c.name]))
                available_galaxies.append(hdf5_request_galaxy)
        else:
            LOG.error('Galaxy {0} does not exist on s3 or glacier!'.format(
                galaxy[GALAXY.c.name]))

    total_request_galaxies = len(hdf5_request_galaxy_ids)
    LOG.info(
        'Need to have {0} galaxies available ({1} currently available)'.format(
            total_request_galaxies * GALAXY_EMAIL_THRESHOLD,
            len(available_galaxies)))
    if len(
            available_galaxies
    ) >= total_request_galaxies * GALAXY_EMAIL_THRESHOLD:  # Only proceed if more than the threshold of galaxies are available
        LOG.info('{0}/{1} (> {2}%) galaxies are available. Email will be sent'.
                 format(len(available_galaxies), total_request_galaxies,
                        GALAXY_EMAIL_THRESHOLD * 100))
        remaining_galaxies = total_request_galaxies - len(available_galaxies)

        for hdf5_request_galaxy in available_galaxies:
            result = HDF5ToFitsResult()
            results.append(result)
            connection.execute(HDF5_REQUEST_GALAXY.update().where(
                HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id ==
                hdf5_request_galaxy.hdf5_request_galaxy_id).values(state=1))
            # noinspection PyBroadException
            try:
                galaxy = connection.execute(
                    select([GALAXY
                            ]).where(GALAXY.c.galaxy_id ==
                                     hdf5_request_galaxy.galaxy_id)).first()
                result.galaxy_name = galaxy[GALAXY.c.name]
                LOG.info('Processing {0} ({1}) for {2}'.format(
                    galaxy[GALAXY.c.name], galaxy[GALAXY.c.galaxy_id], email))

                # make sure the galaxy is available
                if galaxy[GALAXY.c.status_id] == STORED or galaxy[
                        GALAXY.c.status_id] == DELETED:
                    output_dir = tempfile.mkdtemp()
                    try:
                        s3_helper = S3Helper()
                        LOG.info('Getting HDF5 file to {0}'.format(output_dir))
                        tmp_file = get_hdf5_file(s3_helper, output_dir,
                                                 galaxy[GALAXY.c.name],
                                                 galaxy[GALAXY.c.run_id],
                                                 galaxy[GALAXY.c.galaxy_id])
                        LOG.info('File stored in {0}'.format(tmp_file))

                        # We have the file
                        if os.path.isfile(tmp_file):
                            int_flux_output = os.path.join(
                                output_dir, 'intflux')
                            rad_output = os.path.join(output_dir, 'rad')

                            if not os.path.exists(int_flux_output):
                                os.mkdir(int_flux_output)

                            if not os.path.exists(rad_output):
                                os.mkdir(rad_output)

                            file_names = process_hdf5_file(
                                tmp_file,
                                galaxy[GALAXY.c.name],
                                galaxy[GALAXY.c.galaxy_id],
                                pixel_types,
                                features,
                                result,
                                layers,
                                output_dir,
                                rad_output,
                                int_flux_output,
                            )

                            url = zip_files(
                                s3_helper,
                                get_galaxy_file_name(
                                    galaxy[GALAXY.c.name],
                                    galaxy[GALAXY.c.run_id],
                                    galaxy[GALAXY.c.galaxy_id]), uuid_string,
                                file_names, output_dir)

                            connection.execute(HDF5_REQUEST_GALAXY.update(
                            ).where(
                                HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id ==
                                hdf5_request_galaxy.hdf5_request_galaxy_id
                            ).values(state=2,
                                     link=url,
                                     link_expires_at=datetime.now() +
                                     timedelta(days=10)))

                            result.error = None
                            result.link = url

                    except S3ResponseError as e:  # Handling for a strange s3 error
                        LOG.error(
                            'Error retrieving galaxy {0} from s3. Retrying next run'
                            .format(galaxy[GALAXY.c.name]))
                        LOG.error('{0}'.format(str(e)))
                        key = get_key_hdf5(galaxy[GALAXY.c.name],
                                           galaxy[GALAXY.c.run_id],
                                           galaxy[GALAXY.c.galaxy_id])
                        LOG.info('Key: {0}'.format(key))
                        LOG.info('Exists: {0}'.format(
                            s3_helper.file_exists(bucket_name, key)))
                        result.error = traceback.format_exc()
                        remaining_galaxies += 1
                    finally:
                        # Delete the temp files now we're done
                        shutil.rmtree(output_dir)

                else:
                    connection.execute(HDF5_REQUEST_GALAXY.update().where(
                        HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id ==
                        hdf5_request_galaxy.hdf5_request_galaxy_id).values(
                            state=3))
                    result.error = 'Cannot process {0} ({1}) as the HDF5 file has not been generated'.format(
                        galaxy[GALAXY.c.name], galaxy[GALAXY.c.galaxy_id])
                    LOG.info(result.error)
            except:
                LOG.error('Major error')
                result.error = traceback.format_exc()
                connection.execute(HDF5_REQUEST_GALAXY.update().where(
                    HDF5_REQUEST_GALAXY.c.hdf5_request_galaxy_id ==
                    hdf5_request_galaxy.hdf5_request_galaxy_id).values(
                        state=3))

        send_email(email, results, features, layers, pixel_types,
                   remaining_galaxies)
示例#30
0
        else:
            median_value = 1

        sigma = 1 / median_value
        multiplier = 255.0 / math.asinh(top_value * sigma)

        image = Image.new("RGB", (width, height), blackRGB)
        for x in range(0, width - 1):
            for y in range(0, height - 1):
                value = array[y, x, idx]
                if not math.isnan(value) and value > 0:
                    value = int(math.asinh(value * sigma) * multiplier)
                    if value > 255:
                        value = 255
                    red = FIRE_R[value]
                    green = FIRE_G[value]
                    blue = FIRE_B[value]
                    image.putpixel((x, height - y - 1), (red, green, blue))

        file_name = '{0}/image.png'.format(POGS_TMP)
        image.save(file_name)
        s3helper.add_file_to_bucket(
            bucket_name,
            get_build_png_name(
                get_galaxy_file_name(galaxy[GALAXY.c.name],
                                     galaxy[GALAXY.c.run_id],
                                     galaxy[GALAXY.c.galaxy_id]), name),
            file_name)

LOG.info('Built images for %d galaxies', galaxy_count)