Example #1
0
def test_log_to_file(tmpdir, level):

    local_path = tmpdir.join('test.log')
    log_file = local_path.open('wb')
    log_path = str(local_path.realpath())
    orig_level = log.level

    try:
        if level is not None:
            log.setLevel(level)

        with log.log_to_file(log_path):
            log.error("Error message")
            log.warning("Warning message")
            log.info("Information message")
            log.debug("Debug message")

        log_file.close()
    finally:
        log.setLevel(orig_level)

    log_file = local_path.open('rb')
    log_entries = log_file.readlines()
    log_file.close()

    if level is None:
        # The log level *should* be set to whatever it was in the config
        level = conf.log_level

    # Check list length
    if level == 'DEBUG':
        assert len(log_entries) == 4
    elif level == 'INFO':
        assert len(log_entries) == 3
    elif level == 'WARN':
        assert len(log_entries) == 2
    elif level == 'ERROR':
        assert len(log_entries) == 1

    # Check list content

    assert eval(log_entries[0].strip())[-3:] == ('astropy.tests.test_logger',
                                                 'ERROR', 'Error message')

    if len(log_entries) >= 2:
        assert eval(
            log_entries[1].strip())[-3:] == ('astropy.tests.test_logger',
                                             'WARNING', 'Warning message')

    if len(log_entries) >= 3:
        assert eval(
            log_entries[2].strip())[-3:] == ('astropy.tests.test_logger',
                                             'INFO', 'Information message')

    if len(log_entries) >= 4:
        assert eval(
            log_entries[3].strip())[-3:] == ('astropy.tests.test_logger',
                                             'DEBUG', 'Debug message')
Example #2
0
def concatenate_one(strip,
                    logfile = os.path.join(constants.LOGDIR, 'concatenation.log')):
    with log.log_to_file(logfile):
        # Strips are defined by the start longitude
        log.info('Concatenating L={0}'.format(strip))
        for mode in ['light', 'full']:
            for part in ['a', 'b']:
                concat = Concatenator(strip, part, mode)
                concat.run()
    return strip
Example #3
0
def test_log_to_file(tmpdir, level):

    local_path = tmpdir.join('test.log')
    log_file = local_path.open('wb')
    log_path = str(local_path.realpath())
    orig_level = log.level

    try:
        if level is not None:
            log.setLevel(level)

        with log.log_to_file(log_path):
            log.error("Error message")
            log.warning("Warning message")
            log.info("Information message")
            log.debug("Debug message")

        log_file.close()
    finally:
        log.setLevel(orig_level)

    log_file = local_path.open('rb')
    log_entries = log_file.readlines()
    log_file.close()

    if level is None:
        # The log level *should* be set to whatever it was in the config
        level = conf.log_level

    # Check list length
    if level == 'DEBUG':
        assert len(log_entries) == 4
    elif level == 'INFO':
        assert len(log_entries) == 3
    elif level == 'WARN':
        assert len(log_entries) == 2
    elif level == 'ERROR':
        assert len(log_entries) == 1

    # Check list content

    assert eval(log_entries[0].strip())[-3:] == (
        'astropy.tests.test_logger', 'ERROR', 'Error message')

    if len(log_entries) >= 2:
        assert eval(log_entries[1].strip())[-3:] == (
            'astropy.tests.test_logger', 'WARNING', 'Warning message')

    if len(log_entries) >= 3:
        assert eval(log_entries[2].strip())[-3:] == (
            'astropy.tests.test_logger', 'INFO', 'Information message')

    if len(log_entries) >= 4:
        assert eval(log_entries[3].strip())[-3:] == (
            'astropy.tests.test_logger', 'DEBUG', 'Debug message')
Example #4
0
def calibrate_one(filename):
    """Applies the re-calibration to a single bandmerged field catalogue."""
    with log.log_to_file(os.path.join(constants.LOGDIR,
                                      'apply_calibration.log')):
        try:
            ca = CalibrationApplicator()
            ca.run(filename)
        except Exception, e:
            log.error('%s: *UNEXPECTED EXCEPTION*: calibrate_one: %s' 
                      % (filename, e))
        return filename
Example #5
0
def prepare_one(run):
    with log.log_to_file(os.path.join(constants.LOGDIR, 'images.log')):
        result = []
        for ccd in constants.EXTENSIONS:
            try:
                img = SurveyImage(run, ccd)
                img.save()
                result.append(img.get_metadata())
                img.fits_orig.close()  # avoid memory leak
            except Exception, e:
                log.error(str(run)+': '+util.get_pid()+': '+str(e))
        return result
Example #6
0
def seam_one(strip,
             logfile = os.path.join(constants.LOGDIR, 'seaming.log')):
    with log.log_to_file(logfile):

        """Seams the fields in a given longitude strip."""
        # Strips are defined by the start longitude of a 10 deg-wide strip
        assert(strip in np.arange(25, 215+1, constants.STRIPWIDTH))
        log.info('{0}: strip{1}: START'.format(str(datetime.datetime.now())[0:19],
                                               strip))
        # Intialize caching dictionary
        CACHE[strip] = {}

        # Which are our boundaries?
        # Note: we must allow FIELD_MAXDIST for border overlaps!
        lon1 = strip - constants.FIELD_MAXDIST
        lon2 = strip + constants.STRIPWIDTH + constants.FIELD_MAXDIST

        cond_strip = (constants.IPHASQC_COND_RELEASE
                      & (IPHASQC['l'] >= lon1)
                      & (IPHASQC['l'] < lon2))
        n_fields = cond_strip.sum()  # How many fields are in our strip?
        n_processed = 0

        # Seam fields; do the best-seeing fields first!
        for idx in np.argsort(IPHASQC['seeing_max']):
            if cond_strip[idx]:
                n_processed += 1
                log.info('{0}: strip{1}: {2}/{3}: seaming {4}'.format(
                                            str(datetime.datetime.now())[0:19],
                                            strip,
                                            n_processed,
                                            n_fields,
                                            IPHASQC['id'][idx]))
                log.info('{0}: strip{1}: cached {2:.2f} GB'.format(
                                            str(datetime.datetime.now())[0:19],
                                            strip,
                                            sys.getsizeof(CACHE[strip])/(1024**3)))

                try:
                    s = SeamMachine(IPHASQC['id'][idx],
                                    IPHASQC['ra'][idx],
                                    IPHASQC['dec'][idx],
                                    strip)
                    s.run()
                except SeamingException, e:
                    log.error(str(e))
                except Exception, e:
                    pid = socket.gethostname()+'/'+str(os.getpid())
                    log.error('%s strip %s: %s: *UNEXPECTED EXCEPTION*: %s' % (
                                          pid, strip, IPHASQC['id'][idx], e))
Example #7
0
def test_log_to_file_origin2(tmpdir):

    local_path = tmpdir.join('test.log')
    log_file = local_path.open('wb')
    log_path = str(local_path.realpath())

    with log.log_to_file(log_path, filter_origin='astropy.wcs'):
        log.error("Error message")
        log.warning("Warning message")

    log_file.close()

    log_file = local_path.open('rb')
    log_entries = log_file.readlines()
    log_file.close()

    assert len(log_entries) == 0
Example #8
0
def test_log_to_file_origin2(tmpdir):

    local_path = tmpdir.join('test.log')
    log_file = local_path.open('wb')
    log_path = str(local_path.realpath())

    with log.log_to_file(log_path, filter_origin='astropy.wcs'):
        log.error("Error message")
        log.warning("Warning message")

    log_file.close()

    log_file = local_path.open('rb')
    log_entries = log_file.readlines()
    log_file.close()

    assert len(log_entries) == 0
Example #9
0
def test_log_to_file_level(tmpdir):

    local_path = tmpdir.join('test.log')
    log_file = local_path.open('wb')
    log_path = str(local_path.realpath())

    with log.log_to_file(log_path, filter_level='ERROR'):
        log.error("Error message")
        log.warning("Warning message")

    log_file.close()

    log_file = local_path.open('rb')
    log_entries = log_file.readlines()
    log_file.close()

    assert len(log_entries) == 1
    assert eval(log_entries[0].strip())[-2:] == ('ERROR', 'Error message')
Example #10
0
def test_log_to_file_encoding(tmpdir, encoding):

    local_path = tmpdir.join('test.log')
    log_path = str(local_path.realpath())

    orig_encoding = conf.log_file_encoding

    conf.log_file_encoding = encoding

    with log.log_to_file(log_path):
        for handler in log.handlers:
            if isinstance(handler, logging.FileHandler):
                if encoding:
                    assert handler.stream.encoding == encoding
                else:
                    assert handler.stream.encoding == locale.getpreferredencoding()

    conf.log_file_encoding = orig_encoding
Example #11
0
def test_log_to_file_level(tmpdir):

    local_path = tmpdir.join('test.log')
    log_file = local_path.open('wb')
    log_path = str(local_path.realpath())

    with log.log_to_file(log_path, filter_level='ERROR'):
        log.error("Error message")
        log.warning("Warning message")

    log_file.close()

    log_file = local_path.open('rb')
    log_entries = log_file.readlines()
    log_file.close()

    assert len(log_entries) == 1
    assert eval(log_entries[0].strip())[-2:] == (
        'ERROR', 'Error message')
Example #12
0
def bandmerge_one(fieldid):
    """Band-merge a single field """
    with log.log_to_file(os.path.join(constants.LOGDIR, 'bandmerging.log')):
        engine = socket.gethostname()+'/'+str(os.getpid())
        log.info('Starting {0} on {1}'.format(fieldid, engine))

        # Which index does the field have in the QC table?
        idx = np.where(IPHASQC.field('id') == fieldid)[0]
        if len(idx) < 1:
            raise IPHASException('{}: error identifying runs'.format(fieldid))

        # Carry out the band-merging
        bm = BandMerge(fieldid,
                       IPHASQC.field('qflag')[idx[0]],
                       IPHASQC.field('run_ha')[idx[0]],
                       IPHASQC.field('run_r')[idx[0]],
                       IPHASQC.field('run_i')[idx[0]])
        status = bm.run()

        log.info('Finished {0} on {1} (returned {2})'.format(fieldid,
                                                             engine,
                                                             status))
        return status
Example #13
0
def offsets_one(run):
    """Returns all offsets for a given reference exposure.

    Parameters
    ----------
    run : integer or string
        Telescope run number.

    Returns
    -------
    offsets : list of dictionaries
        A sequence of dictionaries for each overlapping run. 
        Each dictionary contains the fields run1/run2/offset/std/n.
    """
    with log.log_to_file(os.path.join(constants.LOGDIR, 'offsets.log')):
        try:
            log.info('{0}: Computing offsets for {1}'.format(util.get_pid(), run))
            om = OffsetMachine(run)
            return om.relative_offsets()
        except Exception, e:
            log.error('{0}: UNEXPECTED EXCEPTION FOR RUN {1}: {2}'.format(util.get_pid(),
                                                                          run,
                                                                          e))
            return [None]
Example #14
0
def hub_script(timeout=0):
    """
    This main function is executed by the ``samp_hub`` command line tool.
    """

    parser = argparse.ArgumentParser(prog="samp_hub " + __version__)

    parser.add_argument("-k", "--secret", dest="secret", metavar="CODE",
                        help="custom secret code.")

    parser.add_argument("-d", "--addr", dest="addr", metavar="ADDR",
                        help="listening address (or IP).")

    parser.add_argument("-p", "--port", dest="port", metavar="PORT", type=int,
                        help="listening port number.")

    parser.add_argument("-f", "--lockfile", dest="lockfile", metavar="FILE",
                        help="custom lockfile.")

    parser.add_argument("-w", "--no-web-profile", dest="web_profile", action="store_false",
                        help="run the Hub disabling the Web Profile.", default=True)

    parser.add_argument("-P", "--pool-size", dest="pool_size", metavar="SIZE", type=int,
                        help="the socket connections pool size.", default=20)

    timeout_group = parser.add_argument_group("Timeout group",
                                              "Special options to setup hub and client timeouts."
                                              "It contains a set of special options that allows to set up the Hub and "
                                              "clients inactivity timeouts, that is the Hub or client inactivity time "
                                              "interval after which the Hub shuts down or unregisters the client. "
                                              "Notification of samp.hub.disconnect MType is sent to the clients "
                                              "forcibly unregistered for timeout expiration.")

    timeout_group.add_argument("-t", "--timeout", dest="timeout", metavar="SECONDS",
                               help="set the Hub inactivity timeout in SECONDS. By default it "
                               "is set to 0, that is the Hub never expires.", type=int, default=0)

    timeout_group.add_argument("-c", "--client-timeout", dest="client_timeout", metavar="SECONDS",
                               help="set the client inactivity timeout in SECONDS. By default it "
                               "is set to 0, that is the client never expires.", type=int, default=0)

    parser.add_argument_group(timeout_group)

    log_group = parser.add_argument_group("Logging options",
                                          "Additional options which allow to customize the logging output. By "
                                          "default the SAMP Hub uses the standard output and standard error "
                                          "devices to print out INFO level logging messages. Using the options "
                                          "here below it is possible to modify the logging level and also "
                                          "specify the output files where redirect the logging messages.")

    log_group.add_argument("-L", "--log-level", dest="loglevel", metavar="LEVEL",
                           help="set the Hub instance log level (OFF, ERROR, WARNING, INFO, DEBUG).",
                           type=str, choices=["OFF", "ERROR", "WARNING", "INFO", "DEBUG"], default='INFO')

    log_group.add_argument("-O", "--log-output", dest="logout", metavar="FILE",
                           help="set the output file for the log messages.", default="")

    parser.add_argument_group(log_group)

    adv_group = parser.add_argument_group("Advanced group",
                                          "Advanced options addressed to facilitate administrative tasks and "
                                          "allow new non-standard Hub behaviors. In particular the --label "
                                          "options is used to assign a value to hub.label token and is used to "
                                          "assign a name to the Hub instance. "
                                          "The very special --multi option allows to start a Hub in multi-instance mode. "
                                          "Multi-instance mode is a non-standard Hub behavior that enables "
                                          "multiple contemporaneous running Hubs. Multi-instance hubs place "
                                          "their non-standard lock-files within the <home directory>/.samp-1 "
                                          "directory naming them making use of the format: "
                                          "samp-hub-<PID>-<ID>, where PID is the Hub process ID while ID is an "
                                          "internal ID (integer).")

    adv_group.add_argument("-l", "--label", dest="label", metavar="LABEL",
                           help="assign a LABEL to the Hub.", default="")

    adv_group.add_argument("-m", "--multi", dest="mode",
                           help="run the Hub in multi-instance mode generating a custom "
                           "lockfile with a random name.",
                           action="store_const", const='multiple', default='single')

    parser.add_argument_group(adv_group)

    options = parser.parse_args()

    try:

        if options.loglevel in ("OFF", "ERROR", "WARNING", "DEBUG", "INFO"):
            log.setLevel(options.loglevel)

        if options.logout != "":
            context = log.log_to_file(options.logout)
        else:
            class dummy_context:

                def __enter__(self):
                    pass

                def __exit__(self, exc_type, exc_value, traceback):
                    pass
            context = dummy_context()

        with context:

            args = copy.deepcopy(options.__dict__)
            del(args["loglevel"])
            del(args["logout"])

            hub = SAMPHubServer(**args)
            hub.start(False)

            if not timeout:
                while hub.is_running:
                    time.sleep(0.01)
            else:
                time.sleep(timeout)
                hub.stop()

    except KeyboardInterrupt:
        try:
            hub.stop()
        except NameError:
            pass
    except OSError as e:
        print("[SAMP] Error: I/O error({0}): {1}".format(e.errno, e.strerror))
        sys.exit(1)
    except SystemExit:
        pass
Example #15
0
from astropy import log
from datetime import datetime
import time
log.setLevel('INFO')
t0 = time.time()

with log.log_to_file("all_analysis_{0}.log".format(datetime.now().isoformat())):

    log.info("Starting make_apex_cubes postprocessing")
    execfile("make_apex_cubes.py")
    do_postprocessing()
    extract_co_subcubes(mergepath=mergepath)

    log.info("Creating pyradex grid.  dt={0}".format(time.time()-t0))
    execfile("pyradex_h2comm_grid.py")

    log.info("Redo dendro.  dt={0}".format(time.time()-t0))
    execfile("redo_dendro.py")
    # execfile("dendro_mask.py")
    # make_dend_303()
    # execfile("dendro_temperature.py")
    # do_dendro_temperatures_both()
    # execfile("dendrotem_plots.py")
    # execfile("make_piecewise_temcube.py")

    log.info("Make ratiotem cubesims.  dt={0}".format(time.time()-t0))
    execfile("make_ratiotem_cubesims.py")
    log.info("Make ratiotem integ.  dt={0}".format(time.time()-t0))
    execfile("make_ratio_integ.py")
    make_ratio_integ()
    make_ratio_max()
Example #16
0
from meteorflux import db

#######
# MAIN
#######

if __name__ == '__main__':

    # Speed-ups
    autocommit = False
    remove_old = True

    # Check if we have an argument
    if len(sys.argv) < 2:
        raise Exception("Please supply the name of a ZIP file.")
    path = sys.argv[1]

    #log.setLevel('DEBUG')
    with log.log_to_file('ingestion.log'):

        mydb = db.FluxDB(autocommit=autocommit)

        if os.path.isdir(path):
            log.info("%s is a directory, will ingest all *.zip files inside." %
                     path)
            metrec.ingest_dir(path, mydb, remove_old)
        else:
            myzip = metrec.ingest_zip(path, mydb, remove_old)

        mydb.commit()
Example #17
0
def main(args=None):
    global ang_size
    global image_directory
    global main_reference_image
    global fwhm_input
    global do_conversion
    global do_registration
    global do_convolution
    global do_resampling
    global do_seds
    global do_cleanup
    global kernel_directory
    global im_pixsc
    global rot_angle
    ang_size = ''
    image_directory = ''
    main_reference_image = ''
    fwhm_input = ''
    do_conversion = False
    do_registration = False
    do_convolution = False
    do_resampling = False
    do_seds = False
    do_cleanup = False
    kernel_directory = ''
    im_pixsc = ''

    # note start time for log
    start_time = datetime.now()

    # parse arguments
    if args !=None:
        arglist = string.split(args)
    else:
        arglist = sys.argv[1:]
    parse_status = parse_command_line(arglist) 
    if parse_status > 0:
        if __name__ == '__main__':
            sys.exit()
        else:
            return

    if (do_cleanup): # cleanup and exit
        cleanup_output_files()
        if __name__ == '__main__':
            sys.exit()
        else:
            return

    # Lists to store information
    global image_data
    global converted_data
    global registered_data
    global convolved_data
    global resampled_data
    global headers
    global filenames
    image_data = []
    converted_data = []
    registered_data = []
    convolved_data = []
    resampled_data = []
    headers = []
    filenames = []

    # if not just cleaning up, make a log file which records input parameters
    logfile_name = 'imagecube_'+ start_time.strftime('%Y-%m-%d_%H%M%S') + '.log'
    with log.log_to_file(logfile_name,filter_origin='imagecube.imagecube'):
    	log.info('imagecube started at %s' % start_time.strftime('%Y-%m-%d_%H%M%S'))
    	log.info('imagecube called with arguments %s' % arglist)

	# Grab all of the .fits and .fit files in the specified directory
        all_files = glob.glob(image_directory + "/*.fit*")
        # no use doing anything if there aren't any files!
        if len(all_files) == 0:
            warnings.warn('No fits files found in directory %s' % image_directory, AstropyUserWarning )
            if __name__ == '__main__':
                sys.exit()
            else:
                return
	
        # get images
        for (i,fitsfile) in enumerate(all_files):
	     hdulist = fits.open(fitsfile)
	     img_extens = find_image_planes(hdulist)
	     # NOTETOSELF: right now we are just using the *first* image extension in a file
	     #             which is not what we want to do, ultimately.
	     header = hdulist[img_extens[0]].header
	     image = hdulist[img_extens[0]].data
	     # Strip the .fit or .fits extension from the filename so we can append
	     # things to it later on
	     filename = os.path.splitext(hdulist.filename())[0]
	     hdulist.close()
	     # check to see if image has reasonable scale & orientation
	     # NOTETOSELF: should this really be here? It's not relevant for just flux conversion.
	     #             want separate loop over image planes, after finishing file loop
	     pixelscale = get_pixel_scale(header)
	     fov = pixelscale * float(header['NAXIS1'])
	     log.info("Checking %s: is pixel scale (%.2f\") < ang_size (%.2f\") < FOV (%.2f\") ?"% (fitsfile,pixelscale, ang_size,fov))
	     if (pixelscale < ang_size < fov):
	         try:
	             wavelength = header['WAVELNTH'] 
	             header['WAVELNTH'] = (wavelength, 'micron') # add the unit if it's not already there
	             image_data.append(image)
	             headers.append(header)
	             filenames.append(filename)
	         except KeyError:
	             warnings.warn('Image %s has no WAVELNTH keyword, will not be used' % filename, AstropyUserWarning)
	     else:
	         warnings.warn("Image %s does not meet the above criteria." % filename, AstropyUserWarning) 
             # end of loop over files
	
        # Sort the lists by their WAVELNTH value
        images_with_headers_unsorted = zip(image_data, headers, filenames)
        images_with_headers = sorted(images_with_headers_unsorted, 
	                             key=lambda header: header[1]['WAVELNTH'])
	
        if (do_conversion):
            convert_images(images_with_headers)
	
        if (do_registration):
            register_images(images_with_headers)
	
        if (do_convolution):
            convolve_images(images_with_headers)
	
        if (do_resampling):
            resample_images(images_with_headers, logfile_name)
	
        if (do_seds):
            output_seds(images_with_headers)
            
        # all done!
        log.info('All tasks completed.')
        if __name__ == '__main__':
	    sys.exit()
        else:
	    return
Example #18
0
def hub_script(timeout=0):
    """
    This main function is executed by the ``samp_hub`` command line tool.
    """

    parser = argparse.ArgumentParser(prog="samp_hub " + __version__)

    parser.add_argument("-k",
                        "--secret",
                        dest="secret",
                        metavar="CODE",
                        help="custom secret code.")

    parser.add_argument("-d",
                        "--addr",
                        dest="addr",
                        metavar="ADDR",
                        help="listening address (or IP).")

    parser.add_argument("-p",
                        "--port",
                        dest="port",
                        metavar="PORT",
                        type=int,
                        help="listening port number.")

    parser.add_argument("-f",
                        "--lockfile",
                        dest="lockfile",
                        metavar="FILE",
                        help="custom lockfile.")

    parser.add_argument("-w",
                        "--no-web-profile",
                        dest="web_profile",
                        action="store_false",
                        help="run the Hub disabling the Web Profile.",
                        default=True)

    parser.add_argument("-P",
                        "--pool-size",
                        dest="pool_size",
                        metavar="SIZE",
                        type=int,
                        help="the socket connections pool size.",
                        default=20)

    timeout_group = parser.add_argument_group(
        "Timeout group", "Special options to setup hub and client timeouts."
        "It contains a set of special options that allows to set up the Hub and "
        "clients inactivity timeouts, that is the Hub or client inactivity time "
        "interval after which the Hub shuts down or unregisters the client. "
        "Notification of samp.hub.disconnect MType is sent to the clients "
        "forcibly unregistered for timeout expiration.")

    timeout_group.add_argument(
        "-t",
        "--timeout",
        dest="timeout",
        metavar="SECONDS",
        help="set the Hub inactivity timeout in SECONDS. By default it "
        "is set to 0, that is the Hub never expires.",
        type=int,
        default=0)

    timeout_group.add_argument(
        "-c",
        "--client-timeout",
        dest="client_timeout",
        metavar="SECONDS",
        help="set the client inactivity timeout in SECONDS. By default it "
        "is set to 0, that is the client never expires.",
        type=int,
        default=0)

    parser.add_argument_group(timeout_group)

    log_group = parser.add_argument_group(
        "Logging options",
        "Additional options which allow to customize the logging output. By "
        "default the SAMP Hub uses the standard output and standard error "
        "devices to print out INFO level logging messages. Using the options "
        "here below it is possible to modify the logging level and also "
        "specify the output files where redirect the logging messages.")

    log_group.add_argument(
        "-L",
        "--log-level",
        dest="loglevel",
        metavar="LEVEL",
        help=
        "set the Hub instance log level (OFF, ERROR, WARNING, INFO, DEBUG).",
        type=str,
        choices=["OFF", "ERROR", "WARNING", "INFO", "DEBUG"],
        default='INFO')

    log_group.add_argument("-O",
                           "--log-output",
                           dest="logout",
                           metavar="FILE",
                           help="set the output file for the log messages.",
                           default="")

    parser.add_argument_group(log_group)

    adv_group = parser.add_argument_group(
        "Advanced group",
        "Advanced options addressed to facilitate administrative tasks and "
        "allow new non-standard Hub behaviors. In particular the --label "
        "options is used to assign a value to hub.label token and is used to "
        "assign a name to the Hub instance. "
        "The very special --multi option allows to start a Hub in multi-instance mode. "
        "Multi-instance mode is a non-standard Hub behavior that enables "
        "multiple contemporaneous running Hubs. Multi-instance hubs place "
        "their non-standard lock-files within the <home directory>/.samp-1 "
        "directory naming them making use of the format: "
        "samp-hub-<PID>-<ID>, where PID is the Hub process ID while ID is an "
        "internal ID (integer).")

    adv_group.add_argument("-l",
                           "--label",
                           dest="label",
                           metavar="LABEL",
                           help="assign a LABEL to the Hub.",
                           default="")

    adv_group.add_argument(
        "-m",
        "--multi",
        dest="mode",
        help="run the Hub in multi-instance mode generating a custom "
        "lockfile with a random name.",
        action="store_const",
        const='multiple',
        default='single')

    parser.add_argument_group(adv_group)

    options = parser.parse_args()

    try:

        if options.loglevel in ("OFF", "ERROR", "WARNING", "DEBUG", "INFO"):
            log.setLevel(options.loglevel)

        if options.logout != "":
            context = log.log_to_file(options.logout)
        else:

            class dummy_context:
                def __enter__(self):
                    pass

                def __exit__(self, exc_type, exc_value, traceback):
                    pass

            context = dummy_context()

        with context:

            args = copy.deepcopy(options.__dict__)
            del (args["loglevel"])
            del (args["logout"])

            hub = SAMPHubServer(**args)
            hub.start(False)

            if not timeout:
                while hub.is_running:
                    time.sleep(0.01)
            else:
                time.sleep(timeout)
                hub.stop()

    except KeyboardInterrupt:
        try:
            hub.stop()
        except NameError:
            pass
    except OSError as e:
        print(f"[SAMP] Error: I/O error({e.errno}): {e.strerror}")
        sys.exit(1)
    except SystemExit:
        pass
from meteorflux import metrec
from meteorflux import db

#######
# MAIN
#######

if __name__ == '__main__':

    # Speed-ups
    autocommit = False
    remove_old = True

    # Check if we have an argument
    if len(sys.argv) < 2:
        raise Exception("Please supply the name of a ZIP file.")
    path = sys.argv[1]

    #log.setLevel('DEBUG')
    with log.log_to_file('ingestion.log'):

        mydb = db.FluxDB(autocommit=autocommit)

        if os.path.isdir(path):
            log.info("%s is a directory, will ingest all *.zip files inside." % path)
            metrec.ingest_dir(path, mydb, remove_old)
        else:
            myzip = metrec.ingest_zip(path, mydb, remove_old)

        mydb.commit()
Example #20
0
                try:
                    result = do_subject(subject_id)
                    agg_res = result.pop('aggregation_result', None)
                    pd.to_pickle(agg_res, 'aggregation_results/{}.pickle.gz'.format(subject_id))
                    pd.to_pickle(result, 'results/{}.pickle.gz'.format(subject_id))
                except Exception as e:
                    log.warn((subject_id, e))
                    q.put(subject_id)
            bar.update(1)

    # for subject_id in tqdm.tqdm(subjects.index, desc='iterating_over_subjects'):
    #     if not overwrite and os.path.exists('results/{}.pickle.gz'.format(subject_id)):
    #         sleep(0.1)
    #         continue
    #     result = do_subject(subject_id)
    #     if result is not None:
    #         agg_res = result.pop('aggregation_result', None)
    #         pd.to_pickle(agg_res, 'aggregation_results/{}.pickle.gz'.format(subject_id))
    #         pd.to_pickle(result, 'results/{}.pickle.gz'.format(subject_id))


if __name__ == '__main__':
    # suppress warnings to keep our beautiful progress bar working
    warnings.simplefilter('ignore', UserWarning)
    log.setLevel('WARN')
    np.seterr(divide='ignore', invalid='ignore')

    os.makedirs('logs', exist_ok=True)
    with log.log_to_file('logs/{}.log'.format(str(datetime.now()).replace(' ', '_'))):
        main()
Example #21
0
def main(args=None):
    global ang_size
    global image_directory
    global main_reference_image
    global fwhm_input
    global do_conversion
    global do_registration
    global do_convolution
    global do_resampling
    global do_seds
    global do_cleanup
    global kernel_directory
    global im_pixsc  # change variable name
    global rot_angle
    global make_2D
    ang_size = ''
    image_directory = ''
    main_reference_image = ''
    fwhm_input = ''
    do_conversion = False
    do_registration = False
    do_convolution = False
    do_resampling = False
    do_seds = False
    do_cleanup = False
    kernel_directory = ''
    im_pixsc = ''

    make_2D = False

    # note start time for log
    start_time = datetime.now()

    # parse arguments
    if args != None:
        arglist = args.split(' ')
    else:
        arglist = sys.argv[1:]
    parse_status = parse_command_line(arglist)
    if parse_status > 0:
        if __name__ == '__main__':
            sys.exit()
        else:
            return

    if (do_cleanup):  # cleanup and exit
        cleanup_output_files()
        if __name__ == '__main__':
            sys.exit()
        else:
            return

    # NOTE_FROM_RK : A lot of these contants seem redundant and unused, need to figure out exactly
    #                which ones are used and remove the rest

    # Lists to store information
    global image_data
    global converted_data
    global registered_data
    global convolved_data
    global resampled_data
    global headers
    global filenames

    converted_data = []
    registered_data = []
    convolved_data = []
    resampled_data = []
    filenames = []
    image_data = []
    headers = []

    # append all the images before creating the stack
    hdus = []

    # First HDU in the stack, just to store some information about the stack
    hdr = fits.Header()
    hdr['COMMENT'] = "Image stack created to form the data cube"

    #this is just to allow for a later sort on the HDU list, can be fixed later
    hdr['WAVELNTH'] = 0
    primary_hdu = fits.PrimaryHDU(header=hdr)
    hdus.append(primary_hdu)

    # if not just cleaning up, make a log file which records input parameters
    logfile_name = 'imagecube_' + start_time.strftime(
        '%Y-%m-%d_%H%M%S') + '.log'
    with log.log_to_file(logfile_name, filter_origin='imagecube.imagecube'):
        log.info('imagecube started at %s' %
                 start_time.strftime('%Y-%m-%d_%H%M%S'))
        log.info('imagecube called with arguments %s' % arglist)

        # Grab all of the .fits and .fit files in the specified directory
        all_files = glob.glob(image_directory + "/*.fit*")
        # no use doing anything if there aren't any files!
        if len(all_files) == 0:
            warnings.warn(
                'No fits files found in directory %s' % image_directory,
                AstropyUserWarning)
            if __name__ == '__main__':
                sys.exit()
            else:
                return

        # get images
        for (i, fitsfile) in enumerate(all_files):
            hdulist = fits.open(fitsfile)
            img_extens = find_image_planes(hdulist)
            # NOTETOSELF: right now we are just using the *first* image extension in a file
            #             which is not what we want to do, ultimately.
            header = hdulist[img_extens[0]].header
            image = hdulist[img_extens[0]].data
            # Strip the .fit or .fits extension from the filename so we can append
            # things to it later on
            filename = os.path.splitext(hdulist.filename())[0]
            hdulist.close()
            # check to see if image has reasonable scale & orientation
            # NOTETOSELF: should this really be here? It's not relevant for just flux conversion.
            #             want separate loop over image planes, after finishing file loop
            pixelscale = get_pixel_scale(header)
            fov = pixelscale * float(header['NAXIS1'])
            log.info(
                "Checking %s: is pixel scale (%.2f\") < ang_size (%.2f\") < FOV (%.2f\") ?"
                % (fitsfile, pixelscale, ang_size, fov))
            if (pixelscale < ang_size < fov):
                try:
                    # there seems to be a different name for wavelength in some images, look into it
                    wavelength = header['WAVELNTH']
                    header['WAVELNTH'] = (
                        wavelength, 'micron'
                    )  # add the unit if it's not already there
                    header['FILENAME'] = fitsfile
                    a = fits.ImageHDU(header=header, data=image)
                    hdus.append(a)
                except KeyError:
                    warnings.warn(
                        'Image %s has no WAVELNTH keyword, will not be used' %
                        filename, AstropyUserWarning)
            else:
                warnings.warn(
                    "Image %s does not meet the above criteria." % filename,
                    AstropyUserWarning)
            # end of loop over files

        # Sort the lists by their WAVELNTH value

        hdus.sort(key=lambda x: x.header['WAVELNTH'])

        # this is the image stack, the data structure stores the images in the following format :

        # Primary HDU : the first HDU contains some information on the stack created
        # Image HDU : the next 'n' image HDUs contain the headers and the data of the image files that
        #             need to be processed by IMAGECUBE

        image_stack = fits.HDUList(hdus)

        # At this step, create a kernel stack as well.
        # It should consist of the 5 kernels that need to be used to convolve.
        # Generate the kernel filename by picking up the instruments for each image and the wavelength
        # Further, before convolving each image from this kernel_stack with images from the image_stack
        # Resample them so that the pixel scale match  -- DOUBT
        # Pixel scale of kernel should match with that of the image pixel scale
        kernels = []
        kernels.append([])

        # this is the url from where the kernels will be downloaded
        url0 = "https://www.astro.princeton.edu/~ganiano/Kernels/Ker_2012/Kernels_fits_Files/Low_Resolution/Kernel_LoRes_"

        # all the images will be transformed to the PSF of the largest wavelength
        to_hdu = image_stack[-1]
        to_instr = str(to_hdu.header['INSTRUME'])
        to_wavelnth = to_hdu.header['WAVELNTH']

        # small hack since MIPS channels sometimes have wavelengths of different levels of precision
        if (to_instr == "MIPS"):
            to_wavelnth = math.ceil(to_wavelnth)

        # For every image in our stack, we first look if there's a corresponding
        # kernel file in the dataset provided. If we dont find one, we look for one on the URL
        # mentioned and generated using the instrument name and wavlenegth. If the website does
        # not seem to have the corresponding kernels, we generate a Gaussian kernel using the
        # FWHM input and the corresponding pixel_scale

        for i in range(1, len(image_stack)):
            original_filename = os.path.basename(
                image_stack[i].header['FILENAME'])
            original_directory = os.path.dirname(
                image_stack[i].header['FILENAME'])

            kernel_filename = (original_directory + "/" + kernel_directory +
                               "/" + original_filename + "_kernel.fits")

            log.info("Looking for " + kernel_filename)

            if os.path.exists(kernel_filename):
                log.info("Found a kernel; will convolve with it shortly.")
                # reading the kernel
                kernel_hdulist = fits.open(kernel_filename)
                kernel_image = kernel_hdulist[0].data
                kernel_hdulist.close()
                kernels.append(kernel_image)

            else:
                fr_instr = str(image_stack[i].header['INSTRUME'])
                fr_wavelnth = image_stack[i].header['WAVELNTH']

                if (fr_instr == 'MIPS'):
                    fr_wavelnth = math.ceil(fr_wavelnth)

                # This is the URL generated, from where we will donwload files.

                url = url0 + str(fr_instr) + "_" + str(
                    fr_wavelnth) + "_to_" + str(to_instr) + "_" + str(
                        to_wavelnth) + ".fits.gz"

                filename = url.split("/")[-1]

                # TODO : Look for these files if they're already downloaded so that these downloads do not need to
                # happen multiple times if the same kernel files are required. Ideally, make a kernels folder to handle this
                with open(filename, "wb") as f:
                    r = requests.get(url)
                    if not r.status_code == 404:
                        f.write(r.content)
                        with gzip.open(filename, 'rb') as f_in:
                            with open(filename.split('.gz')[0], 'wb') as f_out:
                                shutil.copyfileobj(f_in, f_out)
                        log.info("File unzipped : ", filename.split('.gz')[0])

                        # resampling of the kernel, so that the file can be used for convolution
                        resampled_kernel = resample_kernel(
                            filename.split('.gz')[0],
                            image_stack[i].header['FILENAME'])
                        kernels.append(resampled_kernel)

                    else:
                        log.info(
                            "This file doesn't seem to exist on the website : ",
                            filename)
                        native_pixelscale = get_pixel_scale(
                            image_stack[i].header)
                        sigma_input = (fwhm_input /
                                       (2 * math.sqrt(2 * math.log(2)) *
                                        native_pixelscale))
                        kernels.append(Gaussian2DKernel(sigma_input).array)

        kernel_stack = kernels

        if (do_conversion):
            image_stack = convert_images(image_stack)

        if (do_registration):
            image_stack = register_images(image_stack)

        if (do_convolution):
            image_stack = convolve_images(image_stack, kernel_stack)

        if (do_resampling):
            image_stack = resample_images(image_stack, logfile_name)

        if (do_seds):
            output_seds(image_stack)

        # all done!
        log.info('All tasks completed.')
        if __name__ == '__main__':
            sys.exit()
        else:
            return
Example #22
0
from astropy import log
from datetime import datetime
import time

log.setLevel('INFO')
t0 = time.time()

with log.log_to_file("all_analysis_{0}.log".format(
        datetime.now().isoformat())):

    log.info("Starting make_apex_cubes postprocessing")
    execfile("make_apex_cubes.py")
    do_postprocessing()
    extract_co_subcubes(mergepath=mergepath)

    log.info("Creating pyradex grid.  dt={0}".format(time.time() - t0))
    execfile("pyradex_h2comm_grid.py")

    log.info("Redo dendro.  dt={0}".format(time.time() - t0))
    execfile("redo_dendro.py")
    # execfile("dendro_mask.py")
    # make_dend_303()
    # execfile("dendro_temperature.py")
    # do_dendro_temperatures_both()
    # execfile("dendrotem_plots.py")
    # execfile("make_piecewise_temcube.py")

    log.info("Make ratiotem cubesims.  dt={0}".format(time.time() - t0))
    execfile("make_ratiotem_cubesims.py")
    log.info("Make ratiotem integ.  dt={0}".format(time.time() - t0))
    execfile("make_ratio_integ.py")