Example #1
0
    def login(self,
              user=None,
              password=None,
              credentials_file=None,
              verbose=False):
        """Performs a login.
        User and password can be used or a file that contains user name and
        password
        (2 lines: one for user name and the following one for the password)

        Parameters
        ----------
        user : str, mandatory if 'file' is not provided, default None
            login name
        password : str, mandatory if 'file' is not provided, default None
            user password
        credentials_file : str, mandatory if no 'user' & 'password' are provided
            file containing user and password in two lines
        verbose : bool, optional, default 'False'
            flag to display information about the process
        """
        if credentials_file is not None:
            # read file: get user & password
            with open(credentials_file, "r") as ins:
                user = ins.readline().strip()
                password = ins.readline().strip()
        if user is None:
            log.info("Invalid user name")
            return
        if password is None:
            log.info("Invalid password")
            return
        self.__user = user
        self.__pwd = password
        self.__dologin(verbose)
Example #2
0
    def remove_jobs(self, jobs_list, verbose=False):
        """Removes the specified jobs

        Parameters
        ----------
        jobs_list : str, mandatory
            jobs identifiers to be removed
        verbose : bool, optional, default 'False'
            flag to display information about the process

        """
        if jobs_list is None:
            return
        jobsIds = None
        if isinstance(jobs_list, str):
            jobsIds = jobs_list
        elif isinstance(jobs_list, list):
            jobsIds = ','.join(jobs_list)
        else:
            raise Exception("Invalid object type")
        if verbose:
            print("Jobs to be removed: " + str(jobsIds))
        data = "JOB_IDS=" + jobsIds
        subContext = "deletejobs"
        connHandler = self.__getconnhandler()
        response = connHandler.execute_post(subContext, data)
        if verbose:
            print(response.status, response.reason)
            print(response.getheaders())
        isError = connHandler.check_launch_response_status(
            response, verbose, 200)
        if isError:
            log.info(response.reason)
            raise requests.exceptions.HTTPError(response.reason)
def read_out_noise(image, amount, gain=1.0):
    '''
    This function provides the errors introduced with the readout of the CCD
    
    Parameters
    ----------

    image : numpy ndarray  #can be changed with the only shape 
        The image of the CCD, used by the function to extract the shape

    amount : float, int
        The central value of the distribution of the errors, it should be
        related with the background (the sqrt)

    gain : float, optional
        The value of the gain used by the CCD

    Output
    ------

    noise : numpy ndarray
        The image of the CCD with the readout noise generated
    '''

    log.info(hist())

    shape = image.shape
    noise = np.random.normal(scale=amount / gain, size=shape)

    return noise
Example #4
0
    def list_async_jobs(self, verbose=False):
        """Returns all the asynchronous jobs

        Parameters
        ----------
        verbose : bool, optional, default 'False'
            flag to display information about the process

        Returns
        -------
        A list of Job objects
        """
        subContext = "async"
        response = self.__connHandler.execute_get(subContext)
        if verbose:
            print(response.status, response.reason)
            print(response.getheaders())
        isError = self.__connHandler.check_launch_response_status(
            response, verbose, 200)
        if isError:
            log.info(response.reason)
            raise requests.exceptions.HTTPError(response.reason)
            return None
        # parse jobs
        jsp = JobListSaxParser(async_job=True)
        jobs = jsp.parseData(response)
        if jobs is not None:
            for j in jobs:
                j.connHandler = self.__connHandler
        return jobs
Example #5
0
def _convolve_model_dir_2(model_dir, filters, overwrite=False):

    for f in filters:
        if f.name is None:
            raise Exception("filter name needs to be set")
        if f.central_wavelength is None:
            raise Exception("filter central wavelength needs to be set")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_cube = SEDCube.read(os.path.join(model_dir, 'flux.fits'), order='nu')

    par_table = load_parameter_table(model_dir)

    if not np.all(par_table['MODEL_NAME'] == sed_cube.names):
        raise ValueError(
            "Model names in SED cube and parameter file do not match")

    log.info("{0} SEDs found in {1}".format(sed_cube.n_models, model_dir))

    # Set up convolved fluxes
    fluxes = [
        ConvolvedFluxes(model_names=sed_cube.names,
                        apertures=sed_cube.apertures,
                        initialize_arrays=True) for i in range(len(filters))
    ]

    # Set up list of binned filters
    binned_filters = [f.rebin(sed_cube.nu) for f in filters]

    # We do the unit conversion - if needed - at the last minute
    val_factor = sed_cube.val.unit.to(u.mJy)
    unc_factor = sed_cube.unc.unit.to(u.mJy)

    # Loop over apertures
    for i_ap in ProgressBar(range(sed_cube.n_ap)):

        sed_val = sed_cube.val[i_ap].transpose()
        sed_unc = sed_cube.val[i_ap].transpose()

        for i, f in enumerate(binned_filters):

            response = f.response.astype(sed_val.dtype)

            fluxes[i].flux[:, i_ap] = np.sum(sed_val * response,
                                             axis=1) * val_factor
            fluxes[i].error[:, i_ap] = np.sqrt(
                np.sum((sed_unc * response)**2, axis=1)) * unc_factor

    for i, f in enumerate(binned_filters):

        fluxes[i].central_wavelength = f.central_wavelength
        fluxes[i].apertures = sed_cube.apertures
        fluxes[i].model_names = sed_cube.names

        fluxes[i].write(model_dir + '/convolved/' + f.name + '.fits',
                        overwrite=overwrite)
Example #6
0
    def login(self, user=None, password=None, credentials_file=None,
              verbose=False):
        """Performs a login.
        User and password can be used or a file that contains user name and
        password
        (2 lines: one for user name and the following one for the password)

        Parameters
        ----------
        user : str, mandatory if 'file' is not provided, default None
            login name
        password : str, mandatory if 'file' is not provided, default None
            user password
        credentials_file : str, mandatory if no 'user' & 'password' are provided
            file containing user and password in two lines
        verbose : bool, optional, default 'False'
            flag to display information about the process
        """
        if credentials_file is not None:
            # read file: get user & password
            with open(credentials_file, "r") as ins:
                user = ins.readline().strip()
                password = ins.readline().strip()
        if user is None:
            log.info("Invalid user name")
            return
        if password is None:
            log.info("Invalid password")
            return
        self.__user = user
        self.__pwd = password
        self.__dologin(verbose)
def saturation_controll(image):
    '''
    The value of a pixel can't overcome the maximum value of a 16bit memory (65535) and it is
    called saturated. This function will controll that no pixel would surpass this value and,
    if it does, this function will reset its value to 65535

    Parameter
    ---------

    image : numpy ndarray
        It is the image to controll

    Output
    ------

    image : numpy ndarray
        The image controlled and adjusted
    '''

    log.info(hist())

    size = image.shape

    for i in range(size[0]):
        for j in range(size[1]):
            if image[i, j] >= 65535:
                image[i, j] = 65535
    return image
Example #8
0
def save_fits(data, defocus_distance, center, choose):
    '''
    Save a fits with a simple header.

    Parameters
    ----------

    data : numpy array
        Image to save

    defocus_distance : float
        Information about the defocus distance to insert in the header

    center : tuple, array
        Invormation about the coordinate to insert in the header

    choose : int
        is a variable that determines the star used
    
    '''
    log.info(hist())
 
    name = ['Kolmogorov', 'Speckle_Sum', 'Simple_seeing']
    choose = choose-1
    hdul = fits.PrimaryHDU(data) #save the file
    hdr = hdul.header
    hdr['RA'] = (center[0], "Right Ascension in decimal hours" )
    hdr['DEC'] = (center[1], "Declination in decimal degrees")
    hdr['IMGTYPE'] = 'object'
    hdr['IMAGETYP'] = 'object'
    hdr['BZERO'] = 32768
    hdr['DEFOCUS'] = (defocus_distance, "[mm] Distance from focal plane")
    hdul.scale('int16', bzero=32768)
    hdul.writeto(f'try_{name[choose]}.fits', overwrite = True)
Example #9
0
    def remove_jobs(self, jobs_list, verbose=False):
        """Removes the specified jobs

        Parameters
        ----------
        jobs_list : str, mandatory
            jobs identifiers to be removed
        verbose : bool, optional, default 'False'
            flag to display information about the process

        """
        if jobs_list is None:
            return
        jobsIds = None
        if isinstance(jobs_list, str):
            jobsIds = jobs_list
        elif isinstance(jobs_list, list):
            jobsIds = ','.join(jobs_list)
        else:
            raise Exception("Invalid object type")
        if verbose:
            print("Jobs to be removed: " + str(jobsIds))
        data = "JOB_IDS=" + jobsIds
        subContext = "deletejobs"
        connHandler = self.__getconnhandler()
        response = connHandler.execute_post(subContext, data)
        if verbose:
            print(response.status, response.reason)
            print(response.getheaders())
        isError = connHandler.check_launch_response_status(response, verbose, 200)
        if isError:
            log.info(response.reason)
            raise requests.exceptions.HTTPError(response.reason)
Example #10
0
def validate_schema(filename, version='1.1'):
    """
    Validates the given file against the appropriate VOTable schema.

    Parameters
    ----------
    filename : str
        The path to the XML file to validate

    version : str, optional
        The VOTABLE version to check, which must be a string \"1.0\",
        \"1.1\", \"1.2\" or \"1.3\".  If it is not one of these,
        version \"1.1\" is assumed.

        For version \"1.0\", it is checked against a DTD, since that
        version did not have an XML Schema.

    Returns
    -------
    returncode, stdout, stderr : int, str, str
        Returns the returncode from xmllint and the stdout and stderr
        as strings
    """
    if version not in ('1.0', '1.1', '1.2', '1.3'):
        log.info(f'{filename} has version {version}, using schema 1.1')
        version = '1.1'

    if version in ('1.1', '1.2', '1.3'):
        schema_path = data.get_pkg_data_filename(
            f'data/VOTable.v{version}.xsd')
    else:
        schema_path = data.get_pkg_data_filename(
            'data/VOTable.dtd')

    return validate.validate_schema(filename, schema_path)
Example #11
0
def defocus(pupil, defocus_distance, r, wavelenght):
    '''
    Takes the image and created the FT at some distance

    Parameters
    ----------

    pupil : numpy ndarray
        The image of the aperture to transform

    defocus_distance : float
        The distance of the defocus in mm in range(-2.5,2.5)

    r : numpy ndarray
        The radial variable on the pupil

    wavelenght : float
        The center wavelength in mm

    Output
    ------

    image : numpy ndarray
        The FT of the pupil at defocus_distance from the focus plane, AKA the PSF
    '''
    log.info(hist())
        
    phaseAngle = 1j*defocus_distance*10*np.sqrt((2*np.pi/wavelenght)**2-r**2+0j) #unnecessary 0j but keeping it for complex reasons
    kernel = np.exp(phaseAngle)
    defocusPupil = pupil * kernel
    defocusPSFA = fft.fftshift(fft.fft2(defocusPupil))
    image = np.abs(defocusPSFA)
    return image
Example #12
0
def intensity_aberration(aperture, scale, modes, pupil):
    '''Creates the amplitude atmospheric aberration.

    Parameters
    ----------

    aperture : int
        The aperture of the telescope in mm

    scale : int
        The scale of the image generated

    modes : int
        Number of zernikle terms to use 

    pupil : numpy ndarray
        The image of the aperture


    Output
    ------

    zernike : numpy ndarray
        The image of the aberration overlapped with the image of the aperture
    '''

    log.info(hist())
        
    nx_size= aperture//scale
    zernike = []
    zernike_array = ZA(modes, nx_size)
    for i in range(modes):
        zernike.append(reshape_aber(zernike_array[i], scale, pupil))
    return zernike
Example #13
0
def reshape_aber(image, scale, pupil):
    '''
    Adapt the aberration to the pupil size

    Parameters
    ----------

    image : int
        The aperture of the telescope in mm

    scale : int
        The scale of the image generated

    pupil : numpy ndarray
        The image of the aperture


    Output
    ------

    image : numpy ndarray
        The image of the aberration with the right shape to be combinated
        with the aperture    
    '''

    log.info(hist())
        
    image = np.repeat(np.repeat(image, scale, axis = 0), scale, axis = 1)
    units= int((np.shape(pupil)[0]-np.shape(image)[0])/2)
    image = np.pad(image,(units), mode = 'constant')
    return image
Example #14
0
def physical_CCD(CCD_structure):
    '''
    Generates a CCD with a given structure

    Parameters
    ----------

    CCD_structure : array
        The varies entries must have the measure on the CCD
        [0] : int, float; the x length of the CCD in mm
        [1] : int, float; the y length of the CCD in mm
        [2] : float     ; the length of a pixel in mm

    Outputs
    -------

    CCD : numpy ndarray
        The image of the CCD

    x : numpy ndarray
        The x variable on the CCD

    y : numpy ndarray
        The y variable on the CCD   
    '''
    log.info(hist())
        
    len_y = int(CCD_structure[0]/CCD_structure[2])//2
    len_x = int(CCD_structure[1]/CCD_structure[2])//2
    x, y = np.mgrid[-len_x:len_x,-len_y:len_y]
    r = np.sqrt(x**2+y**2)
    CCD = np.piecewise(r, [r], [0])
    return CCD    
Example #15
0
    def list_async_jobs(self, verbose=False):
        """Returns all the asynchronous jobs

        Parameters
        ----------
        verbose : bool, optional, default 'False'
            flag to display information about the process

        Returns
        -------
        A list of Job objects
        """
        subContext = "async"
        response = self.__connHandler.execute_get(subContext)
        if verbose:
            print(response.status, response.reason)
            print(response.getheaders())
        isError = self.__connHandler.check_launch_response_status(response,
                                                                  verbose,
                                                                  200)
        if isError:
            log.info(response.reason)
            raise requests.exceptions.HTTPError(response.reason)
            return None
        # parse jobs
        jsp = JobListSaxParser(async_job=True)
        jobs = jsp.parseData(response)
        if jobs is not None:
            for j in jobs:
                j.connHandler = self.__connHandler
        return jobs
Example #16
0
def converter_to_pixel(CCD_resolution, focal_lenght, quantity_to_convert):  #TO REVISIONATE
    '''Convert a quantity on the aperture to pixels on CCD '''
    log.info(hist())
        
    converter_rad_arc = 180*3600/np.pi
    quantity_to_convert = quantity_to_convert
    quantity_angle = np.arctan(quantity_to_convert/focal_lenght) * converter_rad_arc
    Q_on_CCD = quantity_angle / CCD_resolution
    return Q_on_CCD
Example #17
0
def _convolve_model_dir_2(model_dir, filters, overwrite=False, memmap=True):

    for f in filters:
        if f.name is None:
            raise Exception("filter name needs to be set")
        if f.central_wavelength is None:
            raise Exception("filter central wavelength needs to be set")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_cube = SEDCube.read(os.path.join(model_dir, 'flux.fits'), order='nu',
                            memmap=memmap)

    par_table = load_parameter_table(model_dir)

    if not np.all(par_table['MODEL_NAME'] == sed_cube.names):
        raise ValueError("Model names in SED cube and parameter file do not match")

    log.info("{0} SEDs found in {1}".format(sed_cube.n_models, model_dir))

    # Set up convolved fluxes
    fluxes = [ConvolvedFluxes(model_names=sed_cube.names,
                              apertures=sed_cube.apertures,
                              initialize_arrays=True) for i in range(len(filters))]

    # Set up list of binned filters
    binned_filters = [f.rebin(sed_cube.nu) for f in filters]

    # We do the unit conversion - if needed - at the last minute
    val_factor = sed_cube.val.unit.to(u.mJy)
    unc_factor = sed_cube.unc.unit.to(u.mJy)

    # Loop over apertures
    for i_ap in ProgressBar(range(sed_cube.n_ap)):

        sed_val = sed_cube.val[:, i_ap, :]
        sed_unc = sed_cube.val[:, i_ap, :]

        for i, f in enumerate(binned_filters):

            response = f.response.astype(sed_val.dtype)

            fluxes[i].flux[:, i_ap] = np.sum(sed_val * response, axis=1) * val_factor
            fluxes[i].error[:, i_ap] = np.sqrt(np.sum((sed_unc * response) ** 2, axis=1)) * unc_factor

    for i, f in enumerate(binned_filters):

        fluxes[i].central_wavelength = f.central_wavelength
        fluxes[i].apertures = sed_cube.apertures
        fluxes[i].model_names = sed_cube.names

        fluxes[i].write(model_dir + '/convolved/' + f.name + '.fits',
                        overwrite=overwrite)
Example #18
0
def load_measure():
    '''
    Load the basic data of the telescope and the CCD from a JSON file

    Outputs
    -------
    telescope_structure : list
        a list that contains the information about the telescope
        telescope_structure[0] = f_l : int
            It is the focal length of the telescope in mm            
        telescope_structure[1] = ape : int
            It is the aperture of the telescope in mm
        telescope_structure[2] = obs : int
            It is the central obstruction of the telescope in mm
        telescope_structure[3] = wav : float
            It is the central wavelength of the sensible spectrum of the CCD in mm

    ccd_structure : array
        a array that contains the information about the CCD
        ccd_structure[0] = C_x : float
            It is the length of the CCD along the x axis in mm
        ccd_structure[0] = C_y : float
            It is the length of the CCD along the y axis in mm
        ccd_structure[0] = pix : float
            It is the lenght of a single pixel in mm

    ccd_data : list
        a list that cointains information about the errors generated in the CCD
        ccd_data[0] = gain : float
            the gain of the CCD
        ccd_data[1] = read_out_electrons : float
            the electrons that generate the read out noise
    '''
    log.info(hist())

    #filename = "Antola_data.json"
    #filename = "San_Pedro_data.json"
    filename = "San_Pedro_data_CCD2.json"
    f = open(filename, "r")
    data = json.load(f)
    Telescope = data['Telescope']
    f_l = Telescope['focal_lenght']
    ape = Telescope['aperture']
    obs = Telescope['obstruction']
    wav = Telescope['wavelenght']
    CCD = data['CCD']
    C_x = CCD['CCD_x']
    C_y = CCD['CCD_y']
    pix = CCD['pixels']
    gain = CCD['gain']
    read_out_electrons = CCD["read_out_electrons"]
    f.close()
    telescope_structure = (f_l, ape, obs, wav)
    ccd_structure = [C_x, C_y, pix]
    ccd_data = (gain, read_out_electrons)
    return telescope_structure, ccd_structure, ccd_data
Example #19
0
def sensitivity_variations(image, vignetting=True, dust=True):
    '''
    The sensivity isn't constant, but can vary over the CCD, tipically
    with a gaussian trend and can be worsen by the presence of the dust.
    This function provides for this, creating the flat frame
    

    Parameters
    ----------

    image : numpy ndarray  #can be changed with the only shape nect
        The image of the CCD, used by the function to extract the shape

    vignetting : bool, optional
        If True, the gaussian figure is created on a image with the shape
        of the CCD

    dust : bool, optional
        If True, there will be added the donut generated by the dust on the
        CCD image


    Output
    ------

    sensitivity :numpy ndarray
        The image of the CCD with a big gaussuan curve
        to simulate the variability of the sensivity
    '''
    log.info(hist())

    sensitivity = np.zeros_like(image) + 1.0
    shape = np.array(sensitivity.shape)

    if dust or vignetting:
        # I don't know why, but y,x not x,y
        y, x = np.indices(sensitivity.shape)

    if vignetting:  #TODO, centro gaussiana da spostare
        # Generate very wide gaussian centered on the center of the image,
        # multiply the sensitivity by it.
        #narrowing = np.random.randint(5,10)
        vign_model = Gaussian2D(amplitude=1,
                                x_mean=shape[0] / 2,
                                y_mean=shape[1] / 2,
                                x_stddev=2 * (shape.max()),
                                y_stddev=2 * (shape.max()))
        vign_im = vign_model(x, y)
        sensitivity *= vign_im

    if dust:
        dust_im = add_donuts(image, number=20)
        dust_im = dust_im / dust_im.max()
        sensitivity *= dust_im

    return sensitivity
Example #20
0
def create_config_file(pkg, rootname='astropy', overwrite=False):
    """
    Create the default configuration file for the specified package.
    If the file already exists, it is updated only if it has not been
    modified.  Otherwise the ``overwrite`` flag is needed to overwrite it.

    Parameters
    ----------
    pkg : str
        The package to be updated.
    rootname : str
        Name of the root configuration directory.
    overwrite : bool
        Force updating the file if it already exists.

    Returns
    -------
    updated : bool
        If the profile was updated, `True`, otherwise `False`.

    """

    # local import to prevent using the logger before it is configured
    from astropy.logger import log

    cfgfn = get_config_filename(pkg, rootname=rootname)

    # generate the default config template
    template_content = io.StringIO()
    generate_config(pkg, template_content)
    template_content.seek(0)
    template_content = template_content.read()

    doupdate = True

    # if the file already exists, check that it has not been modified
    if cfgfn is not None and path.exists(cfgfn):
        with open(cfgfn, 'rt', encoding='latin-1') as fd:
            content = fd.read()

        doupdate = is_unedited_config_file(content, template_content)

    if doupdate or overwrite:
        with open(cfgfn, 'wt', encoding='latin-1') as fw:
            fw.write(template_content)
        log.info('The configuration file has been successfully written '
                 f'to {cfgfn}')
        return True
    elif not doupdate:
        log.warning('The configuration file already exists and seems to '
                    'have been customized, so it has not been updated. '
                    'Use overwrite=True if you really want to update it.')

    return False
Example #21
0
    def login(self, token=None, store_token=False, reenter_token=False):
        """
        Log session into the MAST portal.

        Parameters
        ----------
        token : string, optional
            Default is None.
            The token to authenticate the user.
            This can be generated at
            https://auth.mast.stsci.edu/token?suggested_name=Astroquery&suggested_scope=mast:exclusive_access.
            If not supplied, it will be prompted for if not in the keyring or set via $MAST_API_TOKEN
        store_token : bool, optional
            Default False.
            If true, MAST token will be stored securely in your keyring.
        reenter_token :  bool, optional
            Default False.
            Asks for the token even if it is already stored in the keyring or $MAST_API_TOKEN environment variable.
            This is the way to overwrite an already stored password on the keyring.
        """

        if token is None and "MAST_API_TOKEN" in os.environ:
            token = os.environ["MAST_API_TOKEN"]

        if token is None:
            token = keyring.get_password("astroquery:mast.stsci.edu.token",
                                         "masttoken")

        if token is None or reenter_token:
            info_msg = "If you do not have an API token already, visit the following link to create one: "
            log.info(info_msg + self.AUTH_URL)
            token = getpass("Enter MAST API Token: ")

        # store token if desired
        if store_token:
            keyring.set_password("astroquery:mast.stsci.edu.token",
                                 "masttoken", token)

        self.session.headers["Accept"] = "application/json"
        self.session.cookies["mast_token"] = token
        info = self.session_info()

        if not info["anon"]:
            log.info("MAST API token accepted, welcome {}".format(
                info["attrib"].get("display_name")))
        else:
            warn_msg = (
                "MAST API token invalid!\n"
                "To make create a new API token visit to following link: " +
                self.AUTH_URL)
            warnings.warn(warn_msg, AuthenticationWarning)

        return not info["anon"]
Example #22
0
def sky_background_aperture(focal_lenght, aperture, obstruction, trellis=True, atmosphere=False):
    '''
    Calls telescope, intensity_aberration and phase_aberration to create the optical
    figure at the aperure
    
    Parameters
    ----------

    focal_lenght : int
        The focal length of the telescope in mm

    aperture : int
        The aperture of the telescope in mm

    obstruction : int
        The central obstraction of the telescope in mm

    trellis : bool, optional
        If trellis == True the structure that hold on the obstruction is drawn,
        else the structure is not drawn

    atmosphere : bool, optional
        If atmosphere == True intensity_aberration and phase_aberration are called,
        else they are not

    Output
    ------

    pupil : numpy ndarray
        The image of the aperture with all the aberrations

    r : numpy ndarray
        The radial variable on the pupil
    '''
    log.info(hist())
        
    pupil, r = telescope(focal_lenght, aperture, obstruction, trellis)

    if atmosphere:
        #zer = intensity_aberration(aperture, scale, modes, pupil)
        #zernike = zer[0]

        #for i in range(modes-1):
        #    a = np.random.random()
        #    zernike += (a/5)*zer[i]

        phase = phase_aberration(aperture, scale, D, r0, L0, pupil) #Small aperture, long exposure, the kolmogorov turbulance on small scale are no
        #phase = np.sqrt(np.abs(phase))**2
        
        kernel = pupil * phase
        pupil = pupil * np.exp(1j*kernel+0j)
    return pupil, r
def Coordinator(coord, center, CCD_structure, catalog):
    '''
    The function takes the coordinates of a star and uses the WCS keywords
    to give back the position on the CCD in pixel

    Parameters
    ----------

    coord : SkyCoord
        It is the coordinates of the star already elaborated by astropy

    center : array
        It must contain the position of the center of the CCD and it is used to obtain
        the distance of the star from it

    CCD_res : float
        It is the resolution of the CCD in arcsec/pixel

    Outputs
    -------

    x : float
        It is the position of the star on the grid of the CCD along the axis x

    y : float
        It is the position of the star on the grid of the CCD along the axis y
    '''

    log.info(hist())
    if catalog == 'Simbad':
        offset_x = CCD_structure[0]/CCD_structure[2]
        offset_y = CCD_structure[1]/CCD_structure[2]
        w = wcs.WCS(naxis=2)
        w.wcs.crpix = [1, 1]
        w.wcs.crval = [center[0], center[1]]
        w.wcs.ctype = ["RA", "DEC"]    
        x,y = wcs.utils.skycoord_to_pixel(coord, w)
        x = ((x*3600)/CCD_structure[3]) + offset_x/2 #(deg*arc/sec*deg)*arc/sec*pixel+offset
        y = ((y*3600)/CCD_structure[3]) + offset_y/2
    elif catalog == 'Gaia':
        offset_x = CCD_structure[0]/CCD_structure[2]
        offset_y = CCD_structure[1]/CCD_structure[2]
        w = wcs.WCS(naxis=2)
        w.wcs.crpix = [1, 1]
        w.wcs.crval = [center[0], center[1]]
        w.wcs.ctype = ["RA", "DEC"]    
        x,y = wcs.utils.skycoord_to_pixel(coord, w)
        x = ((x*3600)/CCD_structure[3]) + offset_x/2 #(deg*arc/sec*deg)*arc/sec*pixel+offset
        y = ((y*3600)/CCD_structure[3]) + offset_y/2

    return y, x
Example #24
0
def make_cosmic_rays(image, number, strength=10000):
    '''
    It can appens that during an acquisition of an image some pixel are "saturated"
    by the cosmic rays, this function provides for a CCD image with this effect

    Parameters
    ----------

    image : numpy ndarray  #can be changed with the only shape
        The image of the CCD, used by the function to extract the shape

    number : int
        This number is the number of cosmic ray within a single image

    strenght : int, optional
        This is the intensity of a cosmic ray on a pixel

    Output
    ------

    cosmic_image :numpy ndarray
        The image of the CCD with the pixel overflowed by the cosmic rays generated
    '''

    log.info(hist())

    cosmic_image = np.zeros_like(image)

    # Yes, the order below is correct. The x axis is the column, which
    # is the second index.
    max_y, max_x = cosmic_image.shape

    # Get the smallest dimension to ensure the cosmic rays are within the image
    maximum_pos = np.min(cosmic_image.shape)
    # These will be center points of the cosmic rays, which we place away from
    # the edges to ensure they are visible.
    xy_cosmic = np.random.randint(0.1 * maximum_pos,
                                  0.9 * maximum_pos,
                                  size=[number, 2])

    cosmic_length = 5  # pixels, a little big
    cosmic_width = 2
    theta_cosmic = 2 * np.pi * np.random.rand()
    apertures = EllipticalAperture(xy_cosmic, cosmic_length, cosmic_width,
                                   theta_cosmic)
    masks = apertures.to_mask(method='center')
    for mask in masks:
        cosmic_image += strength * mask.to_image(shape=cosmic_image.shape)

    return cosmic_image
Example #25
0
    def load_table(self, table, verbose=False):
        """Loads the specified table

        Parameters
        ----------
        table : str, mandatory
            full qualified table name (i.e. schema name + table name)
        verbose : bool, optional, default 'False'
            flag to display information about the process

        Returns
        -------
        A table object
        """
        log.info("Retrieving table '"+str(table)+"'")
        connHandler = self.__getconnhandler()
        response = connHandler.execute_get("tables?tables="+table)
        if verbose:
            print(response.status, response.reason)
        isError = connHandler.check_launch_response_status(response, verbose, 200)
        if isError:
            log.info("{} {}".format(response.status, response.reason))
            raise requests.exceptions.HTTPError(response.reason)
            return None
        log.info("Parsing table '"+str(table)+"'...")
        tsp = TableSaxParser()
        tsp.parseData(response)
        log.info("Done.")
        return tsp.get_table()
Example #26
0
    def load_table(self, table, verbose=False):
        """Loads the specified table

        Parameters
        ----------
        table : str, mandatory
            full qualified table name (i.e. schema name + table name)
        verbose : bool, optional, default 'False'
            flag to display information about the process

        Returns
        -------
        A table object
        """
        log.info("Retrieving table '" + str(table) + "'")
        connHandler = self.__getconnhandler()
        response = connHandler.execute_get("tables?tables=" + table)
        if verbose:
            print(response.status, response.reason)
        isError = connHandler.check_launch_response_status(
            response, verbose, 200)
        if isError:
            log.info("{} {}".format(response.status, response.reason))
            raise requests.exceptions.HTTPError(response.reason)
            return None
        log.info("Parsing table '" + str(table) + "'...")
        tsp = TableSaxParser()
        tsp.parseData(response)
        log.info("Done.")
        return tsp.get_table()
Example #27
0
def dark_current(image, current, exposure_time, gain=1.0, hot_pixels=False):
    '''
    This function creates a matrix with the shape of the CCD with the errors introduced
    by the dark current with a poissonian distribution. It also provide for the presence
    of hot pixels in the CCD

    Parameters
    ----------

    image : numpy ndarray  #can be changed with the only shape 
        The image of the CCD, used by the function to extract the shape

    current : float
        This is the value of the dark current for 1 second

    exposure_time : int
        The number of second used for obtainig the CCD's image

    gain : float, optional
        The value of the gain used in the CCD, more the gain, more the errors

    hot_pixel : bool, optional
        This flag allows to choose if there will by the hot pixels or not
        if True there will be hot pixels

    Output
    ------

    dark_bias : numpy ndarray
        The image of the CCD with the dark current noise generated
    '''

    log.info(hist())

    base_current = current * exposure_time / gain
    dark_bias = np.random.poisson(base_current, size=image.shape)

    if hot_pixels:
        '''set the probability of 0.01% of a pixel to be hot'''
        y_max, x_max = dark_bias.shape

        numb_hot = int(0.0001 * x_max * y_max)
        hot_x = np.random.randint(0, x_max, size=numb_hot)
        hot_y = np.random.randint(0, y_max, size=numb_hot)

        hot_current = 10000 * current
        dark_bias[[hot_y, hot_x]] = hot_current * exposure_time  #/gain

    return dark_bias
Example #28
0
def telescope_on_CCD(CCD_resolution, binning, telescope_structure, defocus_distance, trellis=True, atmosphere=False):
    '''
    This function calls sky_backgroud_aperture, defocus and image_processing to create the sample on the PSF on the CCD

    Parameters
    ----------

    CCD_resolution : float
        The resolution on the CCD in arcsec/pixel

    telescope_structure : list
        
        telescope_structure[0] = focal_lenght : int
            The focal length of the telescope in mm

        telescope_structure[1] = aperture : int
            The aperture of the telescope in mm

        telescope_structure[2] = obstruction : int
            The central obstraction of the telescope in mm

        telescope_structure[3] = wavelenght : float
            The center wavelength in mm

    defocus_distance : float
        The distance of the defocus in mm in range(-2.5,2.5)
    
    trellis : bool, optional
        If trellis == True the structure that hold on the obstruction is drawn,
        else the structure is not drawn

    atmosphere : bool, optional
        If atmosphere == True intensity_aberration and phase_aberration are called,
        else they are not

    Output
    ------

    image : numpy ndarray
        The image of the PSF
    '''
    log.info(hist())
        
    #units =  converter_to_pixel(CCD_resolution, focal_lenght, 1) #convert 1mm on the aperture in pixel on CCD
    units = CCD_resolution//0.12 #empiric value
    image, r = sky_background_aperture(telescope_structure[0], telescope_structure[1], telescope_structure[2], trellis, atmosphere) #creates the aperture 
    image = defocus(image, defocus_distance, r, telescope_structure[3]) #creates the image on the screen
    image = image_processing(image, binning, telescope_structure[1], atmosphere) 
    return image
Example #29
0
def image_processing(image, m, aperture, atmosphere):
    '''
    This function takes an image and some parameters to obtain the same image cut and strechetd to fit the CCD.
    Also, this function takes only the essentioal information thus reducing the total weight of the final image 

    Parameters
    ----------

    image : numpy ndarray
        The image, usually the PSF

    m : int
        The binnig, used to re-sum the PSF pixels

    aperture : int
        The aperture of the telescope in mm

    atmosphere : bool
        To the long exposure 

    Output
    ------

    new_image : numpy ndarray
        The image cleaned with only the good parts and with the right measure
    '''
    log.info(hist())
    image = (np.abs(image))**2
    size = image.shape
    if atmosphere:
        m = m*4
    new_size = (size[0]//m, size[1]//m)
    new_image = np.zeros(new_size)
    
    if m==1:
        new_image = image
    elif m>=2:
        for i in range(new_size[0]):
            for j in range(new_size[1]):
                new_image[[i],[j]] = sum_image(image, i, j, m)    
    else:
        print('binning problem, PSF binnig ignored')
        new_image = image
    new_image = (new_image/(np.sum(new_image)))*0.2

    new_image = rotate(new_image, angle=45)
    return new_image  
Example #30
0
 def __dologin(self, verbose=False):
     self.__isLoggedIn = False
     response = self.__execLogin(self.__user, self.__pwd, verbose)
     # check response
     connHandler = self.__getconnhandler()
     isError = connHandler.check_launch_response_status(response,
                                                        verbose,
                                                        200)
     if isError:
         log.info("Login error: " + str(response.reason))
         raise requests.exceptions.HTTPError("Login error: " + str(response.reason))
     else:
         # extract cookie
         cookie = self._Tap__findCookieInHeader(response.getheaders())
         if cookie is not None:
             self.__isLoggedIn = True
             connHandler.set_cookie(cookie)
Example #31
0
    def _build_temp_install(self):
        """
        Install the package and to a temporary directory for the purposes of
        testing. This allows us to test the install command, include the
        entry points, and also avoids creating pyc and __pycache__ directories
        inside the build directory
        """

        # On OSX the default path for temp files is under /var, but in most
        # cases on OSX /var is actually a symlink to /private/var; ensure we
        # dereference that link, because pytest is very sensitive to relative
        # paths...

        tmp_dir = tempfile.mkdtemp(prefix=self.package_name + '-test-',
                                   dir=self.temp_root)
        self.tmp_dir = os.path.realpath(tmp_dir)

        log.info(f'installing to temporary directory: {self.tmp_dir}')

        # We now install the package to the temporary directory. We do this
        # rather than build and copy because this will ensure that e.g. entry
        # points work.
        self.reinitialize_command('install')
        install_cmd = self.distribution.get_command_obj('install')
        install_cmd.prefix = self.tmp_dir
        if self.verbose_install:
            self.run_command('install')
        else:
            with _suppress_stdout():
                self.run_command('install')

        # We now get the path to the site-packages directory that was created
        # inside self.tmp_dir
        install_cmd = self.get_finalized_command('install')
        self.testing_path = install_cmd.install_lib

        # Ideally, docs_path is set properly in run(), but if it is still
        # not set here, do not pretend it is, otherwise bad things happen.
        # See astropy/package-template#157
        if self.docs_path is not None:
            new_docs_path = os.path.join(self.testing_path,
                                         os.path.basename(self.docs_path))
            shutil.copytree(self.docs_path, new_docs_path)
            self.docs_path = new_docs_path

        shutil.copy('setup.cfg', self.testing_path)
Example #32
0
 def __dologin(self, verbose=False):
     self.__isLoggedIn = False
     response = self.__execLogin(self.__user, self.__pwd, verbose)
     # check response
     connHandler = self.__getconnhandler()
     isError = connHandler.check_launch_response_status(
         response, verbose, 200)
     if isError:
         log.info("Login error: " + str(response.reason))
         raise requests.exceptions.HTTPError("Login error: " +
                                             str(response.reason))
     else:
         # extract cookie
         cookie = self._Tap__findCookieInHeader(response.getheaders())
         if cookie is not None:
             self.__isLoggedIn = True
             connHandler.set_cookie(cookie)
Example #33
0
def phase_aberration(aperture, scale, D, r0, L0, pupil):
    '''
    Creates an image with the Kolmogorov algorithm
    with the phase atmospheric aberration.
    This function works but usually is not used
    because the telescope is limited by the seeing.

    Parameters
    ----------

    aperture : int
        The aperture of the telescope in mm

    scale : int
        The scale of the image generated

    D : int #inutile e rindondante con aperture, da cambiare e togliere

    r0 : float
        The Fried parameter of the 'seeing'

    L0 : float, int
        The outer scale of the 'seeing'

    pupil : numpy ndarray
        The image of the aperture


    Output
    ------

    phase_screen : numpy ndarray
        The image of the turbolence overlapped with the image of the aperture
    '''

    log.info(hist())
    nx_size= aperture//scale
    plx_scale = D/nx_size
    phase_screen = PhaseScreenKolmogorov(nx_size, plx_scale, r0, L0)
    phase_screen.add_row()
    phase_screen = phase_screen.scrn
    phase_screen = reshape_aber(phase_screen, scale, pupil)
    return phase_screen
def sky_brightness(plate_scale, x_pix, y_pix, photo_filters, exposure_time, moon_phase=3):
    
    log.info(hist())
    '''
    moon_phase = int, optional
        moon_phase can go to 0 (new moon) to 3 (full moon) with intermedian phases 1 (7 day from new moon) and 2 (10 day from new moon)
    '''
    sub = DATA["Sky_brightness"]
    for n, i in enumerate(photo_filters):
        if i == "g":
            photo_filters[n] = "V"
    sky_brightness = [sub[k] for k in photo_filters if k in sub]
    x_arcsec = plate_scale * x_pix
    y_arcsec = plate_scale * y_pix
    Area = x_arcsec * y_arcsec
    magnitudo_ab_sky = []
    magnitudo_ab_sky.append(sky_brightness[0][moon_phase] - 2.5*np.log10(Area))
    photons = magnitudo_to_photons(magnitudo_ab_sky, photo_filters)
    electrons = photons_to_electrons(photons, photo_filters)
    tot = sum(electrons)*exposure_time
    return tot
Example #35
0
    def search_async_jobs(self, jobfilter=None, verbose=False):
        """Searches for jobs applying the specified filter

        Parameters
        ----------
        jobfilter : JobFilter, optional, default None
            job filter
        verbose : bool, optional, default 'False'
            flag to display information about the process

        Returns
        -------
        A list of Job objects
        """
        # jobs/list?[&session=][&limit=][&offset=][&order=][&metadata_only=true|false]
        subContext = "jobs/async"
        if jobfilter is not None:
            data = jobfilter.createUrlRequest()
            if data is not None:
                subContext = subContext + '?' + self.__appendData(data)
        connHandler = self.__getconnhandler()
        response = connHandler.execute_get(subContext)
        if verbose:
            print(response.status, response.reason)
            print(response.getheaders())
        isError = connHandler.check_launch_response_status(response,
                                                           verbose,
                                                           200)
        if isError:
            log.info(response.reason)
            raise requests.exceptions.HTTPError(response.reason)
            return None
        # parse jobs
        jsp = JobSaxParser(async_job=True)
        jobs = jsp.parseData(response)
        if jobs is not None:
            for j in jobs:
                j.set_connhandler(connHandler)
        return jobs
    def _format_output(self, raw_output):
        if self.FORMAT == 'csv':
            split_output = raw_output.splitlines()
            columns = list(
                csv.reader([split_output[0]], delimiter=',', quotechar='"'))[0]
            rows = split_output[1:]

            # Quick test to see if API returned a valid csv file
            # If not, try to return JSON-compliant dictionary.
            test_row = list(csv.reader([rows[0]], delimiter=',',
                                       quotechar='"'))[0]

            if (len(columns) != len(test_row)):
                log.info("The API did not return a valid CSV output! \n"
                         "Outputing JSON-compliant dictionary instead.")

                output = json.loads(raw_output)
                return output

            # Initialize and populate dictionary
            output_dict = {key: [] for key in columns}

            for row in rows:

                split_row = list(
                    csv.reader([row], delimiter=',', quotechar='"'))[0]

                for ct, key in enumerate(columns):
                    output_dict[key].append(split_row[ct])

            # Convert dictionary to Astropy Table.
            output = Table(output_dict, names=columns)

        else:
            # Server response is JSON compliant. Simply
            # convert from raw text to dictionary.
            output = json.loads(raw_output)

        return output
Example #37
0
def telescope(focal_lenght, aperture, obstruction, trellis=True):
    '''
    Creates the figures of the telescope

    Parameters
    ----------

    focal_lenght : int
        The focal length of the telescope in mm

    aperture : int
        The aperture of the telescope in mm

    obstruction : int
        The central obstraction of the telescope in mm

    trellis : bool, optional
        If trellis == True the structure that hold on the obstruction is drawn,
        else the structure is not drawn

    Output
    ------

    pupil : numpy ndarray
        The image of the aperture

    r : numpy ndarray
        The radial variable on the pupil   
    '''
    log.info(hist())

    x,y = np.mgrid[-aperture/2:aperture/2, -aperture/2:aperture/2] # creates the 2D grid for the 2D function *4
    r = np.sqrt(x**2+y**2)
    pupil = np.piecewise(r, [r < aperture/2, r > aperture/2, r < obstruction/2], [1, 0, 0]) #creates the aperture
    if trellis:
        structure_x = np.piecewise(x, [x, x>1, x<0], [0,1,1]) #creates the structure that keep the obstruction
        structure_y = np.piecewise(y, [y, y>1, y<0], [0,1,1])
        pupil = pupil*(structure_x*structure_y)
    return pupil, r
Example #38
0
    def find_radius_cumul(self, fraction):
        """
        Find for each model the radius containing a fraction of the flux.

        Parameters
        ----------
        fraction: float
            The fraction to use when determining the radius
        """

        log.info("Calculating radii containing %g%s of the flux" % (fraction * 100., '%'))

        radius = np.zeros(self.n_models, dtype=self.flux.dtype) * u.au

        if self.apertures is None:

            return radius

        else:

            required = fraction * self.flux[:, -1]

            # Linear interpolation - need to loop over apertures for vectorization
            for ia in range(len(self.apertures) - 1):
                calc = (required >= self.flux[:, ia]) & (required < self.flux[:, ia + 1])
                radius[calc] = (required[calc] - self.flux[calc, ia]) / \
                               (self.flux[calc, ia + 1] - self.flux[calc, ia]) * \
                               (self.apertures[ia + 1] - self.apertures[ia]) + \
                    self.apertures[ia]

            calc = (required < self.flux[:, 0])
            radius[calc] = self.apertures[0]

            calc = (required >= self.flux[:, -1])
            radius[calc] = self.apertures[-1]

            return radius
Example #39
0
    def __load_tables(self, only_names=False, include_shared_tables=False,
                      verbose=False):
        """Loads all public tables

        Parameters
        ----------
        only_names : bool, TAP+ only, optional, default 'False'
            True to load table names only
        include_shared_tables : bool, TAP+, optional, default 'False'
            True to include shared tables
        verbose : bool, optional, default 'False'
            flag to display information about the process

        Returns
        -------
        A list of table objects
        """
        # share_info=true&share_accessible=true&only_tables=true
        flags = ""
        addedItem = False
        if only_names:
            flags = "only_tables=true"
            addedItem = True
        if include_shared_tables:
            if addedItem:
                flags += "&"
            flags += "share_accessible=true"
            addedItem = True
        log.info("Retrieving tables...")
        if flags != "":
            response = self.__connHandler.execute_get("tables?"+flags)
        else:
            response = self.__connHandler.execute_get("tables")
        if verbose:
            print(response.status, response.reason)
        isError = self.__connHandler.check_launch_response_status(response,
                                                                  verbose,
                                                                  200)
        if isError:
            log.info("{} {}".format(response.status, response.reason))
            raise requests.exceptions.HTTPError(response.reason)
            return None
        log.info("Parsing tables...")
        tsp = TableSaxParser()
        tsp.parseData(response)
        log.info("Done.")
        return tsp.get_tables()
Example #40
0
    def find_radius_sigma(self, fraction):
        """
        Find for each model a fractional surface brightness radius

        This is the outermost radius where the surface brightness is larger
        than a fraction of the peak surface brightness.

        Parameters
        ----------
        fraction: float
            The fraction to use when determining the radius
        """

        log.info("Calculating %g%s peak surface brightness radii" % (fraction * 100., '%'))

        sigma = np.zeros(self.flux.shape, dtype=self.flux.dtype)
        sigma[:, 0] = self.flux[:, 0] / self.apertures[0] ** 2
        sigma[:, 1:] = (self.flux[:, 1:] - self.flux[:, :-1]) / \
                       (self.apertures[1:] ** 2 - self.apertures[:-1] ** 2)

        maximum = np.max(sigma, axis=1)

        radius = np.zeros(self.n_models, dtype=self.flux.dtype) * u.au

        # Linear interpolation - need to loop over apertures backwards for vectorization
        for ia in range(len(self.apertures) - 2, -1, -1):
            calc = (sigma[:, ia] > fraction * maximum) & (radius == 0.)
            radius[calc] = (sigma[calc, ia] - fraction * maximum[calc]) / \
                           (sigma[calc, ia] - sigma[calc, ia + 1]) * \
                           (self.apertures[ia + 1] - self.apertures[ia]) + \
                self.apertures[ia]

        calc = sigma[:, -1] > fraction * maximum
        radius[calc] = self.apertures[-1]

        return radius
Example #41
0
def validate_schema(filename, version='1.1'):
    """
    Validates the given file against the appropriate VOTable schema.

    Parameters
    ----------
    filename : str
        The path to the XML file to validate

    version : str, optional
        The VOTABLE version to check, which must be a string \"1.0\",
        \"1.1\", \"1.2\" or \"1.3\".  If it is not one of these,
        version \"1.1\" is assumed.

        For version \"1.0\", it is checked against a DTD, since that
        version did not have an XML Schema.

    Returns
    -------
    returncode, stdout, stderr : int, str, str
        Returns the returncode from xmllint and the stdout and stderr
        as strings
    """
    if version not in ('1.0', '1.1', '1.2', '1.3'):
        log.info('{0} has version {1}, using schema 1.1'.format(
            filename, version))
        version = '1.1'

    if version in ('1.1', '1.2', '1.3'):
        schema_path = data.get_pkg_data_filename(
            'data/VOTable.v{0}.xsd'.format(version))
    else:
        schema_path = data.get_pkg_data_filename(
            'data/VOTable.dtd')

    return validate.validate_schema(filename, schema_path)
Example #42
0
    def load_async_job(self, jobid=None, name=None, verbose=False):
        """Loads an asynchronous job

        Parameters
        ----------
        jobid : str, mandatory if no name is provided, default None
            job identifier
        name : str, mandatory if no jobid is provided, default None
            job name
        verbose : bool, optional, default 'False'
            flag to display information about the process

        Returns
        -------
        A Job object
        """
        if name is not None:
            jobfilter = Filter()
            jobfilter.add_filter('name', name)
            jobs = self.search_async_jobs(jobfilter)
            if jobs is None or len(jobs) < 1:
                log.info("No job found for name '"+str(name)+"'")
                return None
            jobid = jobs[0].get_jobid()
        if jobid is None:
            log.info("No job identifier found")
            return None
        subContext = "async/" + str(jobid)
        response = self.__connHandler.execute_get(subContext)
        if verbose:
            print(response.status, response.reason)
            print(response.getheaders())
        isError = self.__connHandler.check_launch_response_status(response,
                                                                  verbose,
                                                                  200)
        if isError:
            log.info(response.reason)
            raise requests.exceptions.HTTPError(response.reason)
            return None
        # parse job
        jsp = JobSaxParser(async_job=True)
        job = jsp.parseData(response)[0]
        job.set_connhandler(self.__connHandler)
        # load resulst
        job.get_results()
        return job
Example #43
0
    def launch_job_async(self, query, name=None, output_file=None,
                         output_format="votable", verbose=False,
                         dump_to_file=False, background=False,
                         upload_resource=None, upload_table_name=None):
        """Launches an asynchronous job

        Parameters
        ----------
        query : str, mandatory
            query to be executed
        output_file : str, optional, default None
            file name where the results are saved if dumpToFile is True.
            If this parameter is not provided, the jobid is used instead
        output_format : str, optional, default 'votable'
            results format
        verbose : bool, optional, default 'False'
            flag to display information about the process
        dump_to_file : bool, optional, default 'False'
            if True, the results are saved in a file instead of using memory
        background : bool, optional, default 'False'
            when the job is executed in asynchronous mode, this flag specifies
            whether the execution will wait until results are available
        upload_resource: str, optional, default None
            resource to be uploaded to UPLOAD_SCHEMA
        upload_table_name: str, required if uploadResource is provided, default None
            resource temporary table name associated to the uploaded resource

        Returns
        -------
        A Job object
        """
        if verbose:
            print("Launched query: '"+str(query)+"'")
        if upload_resource is not None:
            if upload_table_name is None:
                raise ValueError(
                    "Table name is required when a resource is uploaded")
            response = self.__launchJobMultipart(query,
                                                 upload_resource,
                                                 upload_table_name,
                                                 output_format,
                                                 "async",
                                                 verbose,
                                                 name)
        else:
            response = self.__launchJob(query,
                                        output_format,
                                        "async",
                                        verbose,
                                        name)
        isError = self.__connHandler.check_launch_response_status(response,
                                                                  verbose,
                                                                  303)
        job = Job(async_job=True, query=query, connhandler=self.__connHandler)
        suitableOutputFile = self.__getSuitableOutputFile(True,
                                                          output_file,
                                                          response.getheaders(),
                                                          isError,
                                                          output_format)
        job.outputFile = suitableOutputFile
        job.set_response_status(response.status, response.reason)
        job.parameters['format'] = output_format
        if isError:
            job.set_failed(True)
            if dump_to_file:
                self.__connHandler.dump_to_file(suitableOutputFile, response)
            raise requests.exceptions.HTTPError(response.reason)
        else:
            location = self.__connHandler.find_header(
                response.getheaders(),
                "location")
            jobid = self.__getJobId(location)
            if verbose:
                print("job " + str(jobid) + ", at: " + str(location))
            job.jobid = jobid
            job.remoteLocation = location
            if not background:
                if verbose:
                    print("Retrieving async. results...")
                # saveResults or getResults will block (not background)
                if dump_to_file:
                    job.save_results(verbose)
                else:
                    job.get_results()
                    log.info("Query finished.")
        return job
Example #44
0
    def _download_file(self, url, local_filepath, timeout=None, auth=None,
                       continuation=True, cache=False, method="GET", head_safe=False, **kwargs):
        """
        Download a file.  Resembles `astropy.utils.data.download_file` but uses
        the local ``_session``
        """

        if head_safe:
            response = self._session.request("HEAD", url, timeout=timeout, stream=True,
                                             auth=auth, **kwargs)
        else:
            response = self._session.request(method, url, timeout=timeout, stream=True,
                                             auth=auth, **kwargs)

        response.raise_for_status()
        if 'content-length' in response.headers:
            length = int(response.headers['content-length'])
        else:
            length = None

        if ((os.path.exists(local_filepath) and ('Accept-Ranges' in response.headers) and continuation)):
            open_mode = 'ab'

            existing_file_length = os.stat(local_filepath).st_size
            if length is not None and existing_file_length >= length:
                # all done!
                log.info("Found cached file {0} with expected size {1}."
                         .format(local_filepath, existing_file_length))
                return
            elif existing_file_length == 0:
                open_mode = 'wb'
            else:
                log.info("Continuing download of file {0}, with {1} bytes to "
                         "go ({2}%)".format(local_filepath,
                                            length - existing_file_length,
                                            (length-existing_file_length)/length*100))

                # bytes are indexed from 0:
                # https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#range-request-header
                end = "{0}".format(length-1) if length is not None else ""
                self._session.headers['Range'] = "bytes={0}-{1}".format(existing_file_length,
                                                                        end)

                response = self._session.request(method, url, timeout=timeout, stream=True,
                                                 auth=auth, **kwargs)
                response.raise_for_status()

        elif cache and os.path.exists(local_filepath):
            if length is not None:
                statinfo = os.stat(local_filepath)
                if statinfo.st_size != length:
                    log.warning("Found cached file {0} with size {1} that is "
                                "different from expected size {2}"
                                .format(local_filepath,
                                        statinfo.st_size,
                                        length))
                    open_mode = 'wb'
                else:
                    log.info("Found cached file {0} with expected size {1}."
                             .format(local_filepath, statinfo.st_size))
                    response.close()
                    return
            else:
                log.info("Found cached file {0}.".format(local_filepath))
                response.close()
                return
        else:
            open_mode = 'wb'
            if head_safe:
                response = self._session.request(method, url, timeout=timeout, stream=True,
                                                 auth=auth, **kwargs)
                response.raise_for_status()

        blocksize = astropy.utils.data.conf.download_block_size

        bytes_read = 0

        # Only show progress bar if logging level is INFO or lower.
        if log.getEffectiveLevel() <= 20:
            progress_stream = None  # Astropy default
        else:
            progress_stream = io.StringIO()

        with ProgressBarOrSpinner(
                length, ('Downloading URL {0} to {1} ...'
                         .format(url, local_filepath)),
                file=progress_stream) as pb:
            with open(local_filepath, open_mode) as f:
                for block in response.iter_content(blocksize):
                    f.write(block)
                    bytes_read += blocksize
                    if length is not None:
                        pb.update(bytes_read if bytes_read <= length else
                                  length)
                    else:
                        pb.update(bytes_read)

        response.close()
        return response
Example #45
0
def convolve_model_dir(model_dir, filters, overwrite=False):
    """
    Convolve all the model SEDs in a model directory

    Parameters
    ----------
    model_dir : str
        The path to the model directory
    filters : list
        A list of :class:`~sedfitter.filter.Filter` objects to use for the
        convolution
    overwrite : bool, optional
        Whether to overwrite the output files
    """

    for f in filters:
        if f.name is None:
            raise Exception("filter name needs to be set")
        if f.central_wavelength is None:
            raise Exception("filter central wavelength needs to be set")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_files = (glob.glob(model_dir + '/seds/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*.fits') +
                 glob.glob(model_dir + '/seds/*/*.fits'))

    par_table = load_parameter_table(model_dir)

    if len(sed_files) == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(len(sed_files), model_dir))

    # Find out apertures
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures

    # Set up convolved fluxes
    fluxes = [ConvolvedFluxes(model_names=np.zeros(len(sed_files), dtype='U30' if six.PY3 else 'S30'), apertures=apertures, initialize_arrays=True) for i in range(len(filters))]

    # Set up list of binned filters
    binned_filters = []
    binned_nu = None

    # Loop over SEDs

    b = ProgressBar(len(sed_files))

    for im, sed_file in enumerate(sed_files):

        log.debug('Convolving {0}'.format(os.path.basename(sed_file)))

        # Read in SED
        s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order='nu')

        # Check if filters need to be re-binned
        try:
            assert binned_nu is not None
            np.testing.assert_array_almost_equal_nulp(s.nu.value, binned_nu.value, 100)
        except AssertionError:
            log.info('Rebinning filters')
            binned_filters = [f.rebin(s.nu) for f in filters]
            binned_nu = s.nu

        b.update()

        # Convolve
        for i, f in enumerate(binned_filters):

            fluxes[i].central_wavelength = f.central_wavelength
            fluxes[i].apertures = apertures
            fluxes[i].model_names[im] = s.name

            if n_ap == 1:
                fluxes[i].flux[im] = np.sum(s.flux * f.response)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2))
            else:
                fluxes[i].flux[im, :] = np.sum(s.flux * f.response, axis=1)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2, axis=1))

    for i, f in enumerate(binned_filters):
        fluxes[i].sort_to_match(par_table['MODEL_NAME'])
        fluxes[i].write(model_dir + '/convolved/' + f.name + '.fits',
                        overwrite=overwrite)
Example #46
0
def check_conesearch_sites(destdir=os.curdir, verbose=True, parallel=True,
                           url_list='default'):
    """
    Validate Cone Search Services.

    .. note::

        URLs are unescaped prior to validation.

        Only check queries with ``<testQuery>`` parameters.
        Does not perform meta-data and erroneous queries.

    Parameters
    ----------
    destdir : str, optional
        Directory to store output files. Will be created if does
        not exist. Existing files with these names will be deleted
        or replaced:

            * conesearch_good.json
            * conesearch_warn.json
            * conesearch_exception.json
            * conesearch_error.json

    verbose : bool, optional
        Print extra info to log.

    parallel : bool, optional
        Enable multiprocessing.

    url_list : list of string, optional
        Only check these access URLs against
        ``astroquery.vo_conesearch.validator.conf.conesearch_master_list``
        and ignore the others, which will not appear in output files.
        By default, check those in
        ``astroquery.vo_conesearch.validator.conf.conesearch_urls``.
        If `None`, check everything.

    Raises
    ------
    IOError
        Invalid destination directory.

    timeout
        URL request timed out.

    ValidationMultiprocessingError
        Multiprocessing failed.

    """
    if url_list == 'default':
        url_list = conf.conesearch_urls

    if (not isinstance(destdir, six.string_types) or len(destdir) == 0 or
            os.path.exists(destdir) and not os.path.isdir(destdir)):
        raise IOError('Invalid destination directory')  # pragma: no cover

    if not os.path.exists(destdir):
        os.mkdir(destdir)

    # Output dir created by votable.validator
    out_dir = os.path.join(destdir, 'results')

    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    # Output files
    db_file = OrderedDict()
    db_file['good'] = os.path.join(destdir, 'conesearch_good.json')
    db_file['warn'] = os.path.join(destdir, 'conesearch_warn.json')
    db_file['excp'] = os.path.join(destdir, 'conesearch_exception.json')
    db_file['nerr'] = os.path.join(destdir, 'conesearch_error.json')

    # JSON dictionaries for output files
    js_tree = {}
    for key in db_file:
        js_tree[key] = VOSDatabase.create_empty()

        # Delete existing files, if any, to be on the safe side.
        # Else can cause confusion if program exited prior to
        # new files being written but old files are still there.
        if os.path.exists(db_file[key]):  # pragma: no cover
            os.remove(db_file[key])
            if verbose:
                log.info('Existing file {0} deleted'.format(db_file[key]))

    # Master VO database from registry. Silence all the warnings.
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        js_mstr = VOSDatabase.from_registry(
            conf.conesearch_master_list, encoding='binary',
            show_progress=verbose)

    # Validate only a subset of the services.
    if url_list is not None:
        # Make sure URL is unique and fixed.
        url_list = set(map(
            unescape_all,
            [cur_url.encode('utf-8') if isinstance(cur_url, str) else cur_url
             for cur_url in url_list]))
        uniq_rows = len(url_list)
        url_list_processed = []  # To track if given URL is valid in registry
        if verbose:
            log.info('Only {0}/{1} site(s) are validated'.format(uniq_rows,
                                                                 len(js_mstr)))
    # Validate all services.
    else:
        uniq_rows = len(js_mstr)

    key_lookup_by_url = {}

    # Process each catalog in the registry.
    for cur_key, cur_cat in js_mstr.get_catalogs():
        cur_url = cur_cat['url'].encode('utf-8')

        # Skip if:
        #   a. not a Cone Search service
        #   b. not in given subset, if any
        if ((cur_cat['cap_type'] != b'conesearch') or
                (url_list is not None and cur_url not in url_list)):
            continue

        # Use testQuery to return non-empty VO table with max verbosity.
        testquery_pars = parse_cs(cur_cat['ivoid'], cur_cat['cap_index'])
        cs_pars_arr = ['{}={}'.format(key, testquery_pars[key]).encode('utf-8')
                       for key in testquery_pars]
        cs_pars_arr += [b'VERB=3']

        # Track the service.
        key_lookup_by_url[cur_url + b'&'.join(cs_pars_arr)] = cur_key
        if url_list is not None:
            url_list_processed.append(cur_url)

    # Give warning if any of the user given subset is not in the registry.
    if url_list is not None:
        url_list_skipped = url_list - set(url_list_processed)
        n_skipped = len(url_list_skipped)
        if n_skipped > 0:
            warn_str = '{0} not found in registry! Skipped:\n'.format(
                n_skipped)
            for cur_url in url_list_skipped:
                warn_str += '\t{0}\n'.format(cur_url)
            warnings.warn(warn_str, AstropyUserWarning)

    all_urls = list(key_lookup_by_url)
    timeout = data.conf.remote_timeout
    map_args = [(out_dir, url, timeout) for url in all_urls]

    # Validate URLs
    if parallel:
        pool = multiprocessing.Pool()
        try:
            mp_list = pool.map(_do_validation, map_args)
        except Exception as exc:  # pragma: no cover
            raise ValidationMultiprocessingError(
                'An exception occurred during parallel processing '
                'of validation results: {0}'.format(exc))
    else:
        mp_list = map(_do_validation, map_args)

    # Categorize validation results
    for r in mp_list:
        db_key = r['out_db_name']
        cat_key = key_lookup_by_url[r.url]
        cur_cat = js_mstr.get_catalog(cat_key)
        _copy_r_to_cat(r, cur_cat)
        js_tree[db_key].add_catalog(cat_key, cur_cat)

    # Write to HTML
    html_subsets = result.get_result_subsets(mp_list, out_dir)
    html.write_index(html_subsets, all_urls, out_dir)
    if parallel:
        html_subindex_args = [(out_dir, html_subset, uniq_rows)
                              for html_subset in html_subsets]
        pool.map(_html_subindex, html_subindex_args)
    else:
        for html_subset in html_subsets:
            _html_subindex((out_dir, html_subset, uniq_rows))

    # Write to JSON
    n = {}
    n_tot = 0
    for key in db_file:
        n[key] = len(js_tree[key])
        n_tot += n[key]
        js_tree[key].to_json(db_file[key], overwrite=True)
        if verbose:
            log.info('{0}: {1} catalog(s)'.format(key, n[key]))

    # Checksum
    if verbose:
        log.info('total: {0} out of {1} catalog(s)'.format(n_tot, uniq_rows))

    if n['good'] == 0:  # pragma: no cover
        warnings.warn(
            'No good sites available for Cone Search.', AstropyUserWarning)
Example #47
0
def convolve_model_dir_monochromatic(model_dir, overwrite=False, max_ram=8,
                                     wav_min=-np.inf * u.micron, wav_max=np.inf * u.micron):
    """
    Convolve all the model SEDs in a model directory

    Parameters
    ----------
    model_dir : str
        The path to the model directory
    overwrite : bool, optional
        Whether to overwrite the output files
    max_ram : float, optional
        The maximum amount of RAM that can be used (in Gb)
    wav_min : float, optional
        The minimum wavelength to consider. Only wavelengths above this value
        will be output.
    wav_max : float, optional
        The maximum wavelength to consider. Only wavelengths below this value
        will be output.
    """

    modpar = parfile.read(os.path.join(model_dir, 'models.conf'), 'conf')
    if modpar.get('version', 1) > 1:
        raise ValueError("monochromatic filters are no longer used for new-style model directories")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_files = (glob.glob(model_dir + '/seds/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*.fits') +
                 glob.glob(model_dir + '/seds/*/*.fits'))

    par_table = load_parameter_table(model_dir)

    # Find number of models
    n_models = len(sed_files)

    if n_models == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(n_models, model_dir))

    # Find out apertures and wavelengths
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures
    n_wav = first_sed.n_wav
    wavelengths = first_sed.wav

    # For model grids that are very large, it is not possible to compute all
    # fluxes in one go, so we need to process in chunks in wavelength space.
    chunk_size = min(n_wav, int(np.floor(max_ram * 1024. ** 3 / (4. * 2. * n_models * n_ap))))

    if chunk_size == n_wav:
        log.info("Producing all monochromatic files in one go")
    else:
        log.info("Producing monochromatic files in chunks of {0}".format(chunk_size))

    filters = Table()
    filters['wav'] = wavelengths
    filters['filter'] = np.zeros(wavelengths.shape, dtype='S10')

    # Figure out range of wavelength indices to use
    # (wavelengths array is sorted in reverse order)
    jlo = n_wav - 1 - (wavelengths[::-1].searchsorted(wav_max) - 1)
    jhi = n_wav - 1 - wavelengths[::-1].searchsorted(wav_min)
    chunk_size = min(chunk_size, jhi - jlo + 1)

    # Loop over wavelength chunks
    for jmin in range(jlo, jhi, chunk_size):

        # Find upper wavelength to compute
        jmax = min(jmin + chunk_size - 1, jhi)

        log.info('Processing wavelengths {0} to {1}'.format(jmin, jmax))

        # Set up convolved fluxes
        fluxes = [ConvolvedFluxes(model_names=np.zeros(n_models, dtype='U30' if six.PY3 else 'S30'), apertures=apertures, initialize_arrays=True) for i in range(chunk_size)]

        b = ProgressBar(len(sed_files))

        # Loop over SEDs
        for im, sed_file in enumerate(sed_files):

            b.update()

            log.debug('Processing {0}'.format(os.path.basename(sed_file)))

            # Read in SED
            s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order='nu')

            # Convolve
            for j in range(chunk_size):

                fluxes[j].central_wavelength = wavelengths[j + jmin]
                fluxes[j].apertures = apertures
                fluxes[j].model_names[im] = s.name

                if n_ap == 1:
                    fluxes[j].flux[im] = s.flux[0, j + jmin]
                    fluxes[j].error[im] = s.error[0, j + jmin]
                else:
                    fluxes[j].flux[im, :] = s.flux[:, j + jmin]
                    fluxes[j].error[im, :] = s.error[:, j + jmin]

        for j in range(chunk_size):
            fluxes[j].sort_to_match(par_table['MODEL_NAME'])
            fluxes[j].write('{0:s}/convolved/MO{1:03d}.fits'.format(model_dir, j + jmin + 1),
                            overwrite=overwrite)
            filters['filter'][j + jmin] = "MO{0:03d}".format(j + jmin + 1)

    return filters
Example #48
0
def _convolve_model_dir_1(model_dir, filters, overwrite=False):

    for f in filters:
        if f.name is None:
            raise Exception("filter name needs to be set")
        if f.central_wavelength is None:
            raise Exception("filter central wavelength needs to be set")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + "/convolved"):
        os.mkdir(model_dir + "/convolved")

    # Find all SED files to convolve
    sed_files = (
        glob.glob(model_dir + "/seds/*.fits.gz")
        + glob.glob(model_dir + "/seds/*/*.fits.gz")
        + glob.glob(model_dir + "/seds/*.fits")
        + glob.glob(model_dir + "/seds/*/*.fits")
    )

    par_table = load_parameter_table(model_dir)

    if len(sed_files) == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(len(sed_files), model_dir))

    # Find out apertures
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures

    # Set up convolved fluxes
    fluxes = [
        ConvolvedFluxes(
            model_names=np.zeros(len(sed_files), dtype="U30" if six.PY3 else "S30"),
            apertures=apertures,
            initialize_arrays=True,
        )
        for i in range(len(filters))
    ]

    # Set up list of binned filters
    binned_filters = []
    binned_nu = None

    # Loop over SEDs

    b = ProgressBar(len(sed_files))

    for im, sed_file in enumerate(sed_files):

        log.debug("Convolving {0}".format(os.path.basename(sed_file)))

        # Read in SED
        s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order="nu")

        # Check if filters need to be re-binned
        try:
            assert binned_nu is not None
            np.testing.assert_array_almost_equal_nulp(s.nu.value, binned_nu.value, 100)
        except AssertionError:
            log.info("Rebinning filters")
            binned_filters = [f.rebin(s.nu) for f in filters]
            binned_nu = s.nu

        b.update()

        # Convolve
        for i, f in enumerate(binned_filters):

            fluxes[i].central_wavelength = f.central_wavelength
            fluxes[i].apertures = apertures
            fluxes[i].model_names[im] = s.name

            if n_ap == 1:
                fluxes[i].flux[im] = np.sum(s.flux * f.response)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2))
            else:
                fluxes[i].flux[im, :] = np.sum(s.flux * f.response, axis=1)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2, axis=1))

    for i, f in enumerate(binned_filters):
        fluxes[i].sort_to_match(par_table["MODEL_NAME"])
        fluxes[i].write(model_dir + "/convolved/" + f.name + ".fits", overwrite=overwrite)