Пример #1
0
def tmp_OGLE(event):
    # set I/O shell display
    tcol, tun, tend, tit = "\033[0m\033[34m", "\033[0m\033[1m\033[34m", "\033[0m", "\033[0m\033[3m"
    
    # default values
    t0, tE  = 8630., 30.
    
    # check whether OGLE data set exist
    if event[0] == 'O':
        ## attention si MOA ca prend le mauvais data set !
        evnum = event[4:8]
        
        # get microlensing season
        year = '20' + event[2:4]
        
        # ftp OGLE
        try:
            ogledir = '/ogle/ogle4/ews/' + year + '/' + 'blg-' + evnum + '/'
            ftp = ftplib.FTP('ftp.astrouw.edu.pl', 'anonymous', '')
            ftp.cwd(ogledir)
            ftp.retrbinary('RETR params.dat', open('./' + event + '/data/params.dat', 'wb').write)
            ftp.quit()
        
            # get OGLE PSPL best-fit parameters
            df = pd.read_csv('./' + event + '/data/params.dat', names=['col'])
            t0 = float([a for a in df['col'].values[6].split(' ') if a][1]) - 2450000.
            tE = float([a for a in df['col'].values[7].split(' ') if a][1])

            printi(tcol + "OGLE best-fit PSPL parameters " + tit + "(t0={0}, tE={1})".format(t0, tE) + tcol + " found" + tend)
        except:
            printw(tun + "Failed to connect to ftp.astrouw.edu.pl" + tend)
 
    return t0, tE
Пример #2
0
def fill_webpage(inhtml, outhtml, fillwith):
    """Fill the template html page with new html content
        
        Example
        -------
        >>> fillwpage('in.html', 'out.html', ('_keyword_', var))
        >>> fillwpage('in.html', 'out.html', [('_keyword1_', var1), ('_keyword2_', var2)])
        """
    # set I/O shell display
    tcol, tun, tend, tit = "\033[0m\033[33m", "\033[0m\033[1m\033[33m", "\033[0m", "\033[0m\033[3m"
    
    # important - add these lines between <header> tags --> mettre dans le code event.html
    """<link rel="stylesheet" href="http://cdn.pydata.org/bokeh/release/bokeh-0.12.9.min.css" type="text/css" /><script type="text/javascript" src="http://cdn.pydata.org/bokeh/release/bokeh-0.12.9.min.js"></script><!-- _SCRIPT_ -->"""
    
    # fill html page
    with open(inhtml, 'r') as f:
        cont = f.read()
        if type(fillwith) == tuple:
            fillwith = [fillwith]
        for fill in fillwith:
            key, new = fill
            cont = cont.replace(key, new)
        f.close()
    with open(outhtml, 'w') as f:
        f.write(cont)
        f.close()

    # verbose
    printi(tcol + "Fill document : " + tit + "'" + inhtml + "'" + tcol + " >>> " + tit + "'" + outhtml + "'" + tend)
    for fill in fillwith:
        printd(tit + "  (keyword: " + fill[0] + ")" + tend)
Пример #3
0
def rsync_ARTEMiS_alerts():
    """Syncrhonize alerts with ARTEMiS Signalmen
        
        Returns:
        --------
        0 : download succeeded,
        2 : download failed.
        """
    # set I/O shell display
    tcol, tun, tend, tit = "\033[0m\033[34m", "\033[0m\033[1m\033[34m", "\033[0m", "\033[0m\033[3m"
    
    #    rsync -azu [email protected]::FollowUp/FollowUpZ.signalmen .
    printi(tcol + "Rsync ARTEMiS Signalmen" + tend)
    
    # password in file
    pswd = 'pswd_signalmen.txt'
    # archive format: XKB180039I, with X=dataset, I=filter
    proc = subprocess.Popen('rsync -az --password-file=' + pswd + ' [email protected]::FollowUp/FollowUpZ.signalmen ARTEMiS_alerts.txt', shell=True, executable='/bin/bash', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    proc.wait()
    
    # test whether data were downloaded, and if not treat as exception
    stdout, stderr = proc.communicate()
    
    if stderr:
        printw(tun + "Failed to rsync ARTEMiS Signalmen\n" + tit + stderr + tend)
        return 1
    else:
        printd(tit + "  (list of alerts downloaded from ATERMiS Signalmen)" + tend)
        return 0
Пример #4
0
def fit_subgrids(gridsprefix,
                 fitsprefix,
                 datasets,
                 gridlist,
                 init=None,
                 trange=None,
                 nprocs=1,
                 overwrite=False):
    """Fit light curve on magnification curves grids
        
        IDEE : on pourra utiliser un random des 1000 pour faire
        des maps a faible resolution !
        """
    # set I/O shell display
    tcol, tit, tend = "\033[0m\033[31m", "\033[0m\033[3m", "\033[0m"

    # check whether input names does not contain extensions
    if '.hdf5' in gridsprefix:
        raise NameError("grid prefix should not contain .hdf5 extension")
    if '.hdf5' in fitsprefix:
        raise NameError("fit prefix should not contain .hdf5 extension")

    # delete existing HDF5 files in fits/
    if overwrite:
        printd(tcol + "Removing previous HDF5 files from fits/" + tend)
        proc = subprocess.Popen('rm -rf  ' + fitsprefix + '*.hdf5',
                                shell=True,
                                executable='/bin/bash')
        proc.wait()

    # mutliprocessing: create grid list names
    listmclibs, listlclibs = list(), list()
    for gridi in gridlist:

        mclib = gridsprefix + '_' + str(gridi) + '.hdf5'
        lclib = fitsprefix + '_' + str(gridi) + '.hdf5'
        listmclibs.append(mclib)
        listlclibs.append(lclib)

    # mutliprocessing: create arguments of _process_fits, and create workers pool
    printi(tcol + "Starting manager with PID " + tit + str(os.getpid()) +
           tcol + " running " + tit + str(nprocs) + tcol + " process(es)" +
           tend)
    listargs = zip(listmclibs, listlclibs, repeat(datasets), repeat(trange),
                   repeat(init))
    pool = Pool(processes=nprocs)
    pool.imap_unordered(_process_fits, listargs)

    # collect results
    pool.close()
    pool.join()
Пример #5
0
def rsync_ARTEMiS_data(event, syncdir):
    """Syncrhonize data with ARTEMiS
        
        Returns:
        --------
        0 : synchronization succeeded and
            local data were updated,
        1 : synchronization succeeded but
            local data were already up to date,
        2 : synchronization failed.
        """
    # set I/O shell display
    tcol, tun, tend, tit = "\033[0m\033[34m", "\033[0m\033[1m\033[34m", "\033[0m", "\033[0m\033[3m"
    
    # check event name format
    if len(event) != 8:
        raise ValueError("wrong format for event name: correct is 'xByyzzzz'")

    # get microlensing season
    year = '20' + event[2:4]
    
    #    rsync -azu [email protected]::Data2018/OOB180088I.dat .
    printi(tcol + "Rsync ARTEMiS database" + tend)

    # password in file
    pswd = 'pswd_artemis.txt'

    # archive format: XKB180039I, with X=dataset, I=filter
    artemisevent = event.replace('M', 'K')
    proc = subprocess.Popen('rsync -avzu -L --password-file=' + pswd + ' [email protected]::Data' + year + '/*' + artemisevent + '*.dat ' + syncdir + ' --stats | grep "files transferred:"', shell=True, executable='/bin/bash', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    proc.wait()
    
    # get std output and error
    stdout, stderr = proc.communicate()
    
    # treat rsync outputs
    if stderr:
        printw(tun + "Failed to rsync ARTEMiS\n" + tit + stderr + tend)
        return 2
    else:
        # check if files were updated
        if ' 0' in stdout:
            printd(tit + "  (files have not changed on ARTEMiS server)" + tend)
            return 1
        else:
            printd(tit + "  (local files have been updated from ATERMiS server)" + tend)
            return 0
Пример #6
0
def ftp_miiriads(infile, outfile):
    """Send filled html page to miiriads.iap.fr web site"""
    # set I/O shell display
    tcol, tun, tend, tit = "\033[0m\033[34m", "\033[0m\033[1m\033[34m", "\033[0m", "\033[0m\033[3m"
    
    # verbose
    printi(tcol + "Uploading to miiriads.iap.fr " + tit + "'" + infile + "'" + tcol + " >>> " + tit + "'" + outfile + "'" + tend)
    
    # setup ftp connection
    try:
        ftp = ftplib.FTP('webeur2.iap.fr', 'miiriads', '1OrMore!!')
        path, filename = split(outfile)
        ftp.cwd(path)
        ftp.storbinary('STOR {}'.format(filename), open(infile))
        ftp.quit()
    except:
        printw(tun + "Failed to upload '" + tit + infile + tun + "' to miiriads.iap.fr" + tend)
Пример #7
0
def makehtml_model(html_model_template, event, models, datasets, trange=None, crossrefs=None):
    """Create individual model of individual event html page"""
    
    # set I/O shell display
    tcol, tun, tend, tit = "\033[0m\033[35m", "\033[0m\033[1m\033[35m", "\033[0m", "\033[0m\033[3m"
    
    # generate one page per individual model
    mc = MagnificationCurve()
    lc = LightCurve(datasets, trange=trange, dmag=None)
    plt_width = 800
    
    for i in range(len(models)):
        
        model = models[i]
        
        # html page name
        printi(tcol + "Creating " + tit + "'" + event + '/' + event + '_' + str(i + 1) + '.html' + "'" + tcol + " web page" + tend)

        # fit light curve using best-fit parameters
        mc.create(model)
        lc.fit(mc, 'central', '+', init=None)
    
        # ligth curve plot template (bind x-axis on light curve plot, with automatic t-range)
        plc = bplt.figure(width=plt_width, plot_height=500, title='Light curve', tools=["pan", "wheel_zoom", "box_zoom", "undo", "redo", "reset", "save"], active_drag="pan", active_scroll="wheel_zoom")
        
        if trange:
            tmin = lc.params['t0'] - 1.5 * lc.params['tE']
            tmax = lc.params['t0'] + 1.5 * lc.params['tE']
            plc.x_range = Range1d(tmin, tmax)

        # residuals plot template
        pres = bplt.figure(width=plt_width, plot_height=200, title='Residuals', x_range=plc.x_range, y_range=(-0.1, 0.1), tools=["pan", "wheel_zoom", "reset", "save"], active_drag="pan", active_scroll="wheel_zoom")

        # caustics plot template
        plt_height = 400
        pcc = bplt.figure(width=plt_width, plot_height=plt_height, title='Source trajectory', tools=["pan", "wheel_zoom", "box_zoom", "undo", "redo", "reset", "save"], active_drag="pan", active_scroll="wheel_zoom", match_aspect=True, x_range=(- float(plt_width)/plt_height, float(plt_width)/plt_height), y_range=(-1, 1))

        # plot caustics
        cc = Caustics(mc.params['s'], mc.params['q'], N=256, cusp=True)
        z = np.ravel(cc.zetac)
        pcc.circle(np.real(z), np.imag(z), size=1, alpha=1., color='red')
   
        # plot trajectory
        traj = lambda t: (-lc.params['u0'] * np.sin(lc.params['alpha']) + np.cos(lc.params['alpha']) * t, lc.params['u0'] * np.cos(lc.params['alpha']) + np.sin(lc.params['alpha']) * t)

        pcc.line(traj(np.array([-10., 10.]))[0], traj(np.array([-10., 10.]))[1], color='firebrick', line_width=1)
   
        pcc.add_layout(Arrow(line_width=1, line_color='firebrick', end=VeeHead(size=10, line_color='firebrick'), x_start=traj((tmin - lc.params['t0'])/lc.params['tE'])[0], y_start=traj((tmin - lc.params['t0'])/lc.params['tE'])[1], x_end=traj((tmax - lc.params['t0'])/lc.params['tE'])[0], y_end=traj((tmax - lc.params['t0'])/lc.params['tE'])[1]))

        # compute blended flux
        flux = lc._mu * lc.params['Fs'][0] + lc.params['Fb'][0]
        t = lc._t * lc.params['tE'] + lc.params['t0']

        # remove points with negative flux and NaN
        arg = np.logical_not(np.isnan(flux)) & (flux > 0.)
        flux = flux[arg]
        t = t[arg]

        # invert light curve y-axis
        plc.y_range = Range1d(1.02 * np.max(-2.5 * np.log10(flux)), 0.98 * np.min(-2.5 * np.log10(flux)))

        # theoretical ligth curve
        plc.line(t, -2.5 * np.log10(flux), color='firebrick')

        # add data on plot
        atel = iter(color_tel)
        for k in range(len(lc._datalist)):
            dat = lc._datalist[k]
            # color and legend
            _ , datai = split(dat[6])
            tel = datai[0]
            if tel not in color_tel:
                tel = atel.next()
            color = color_tel[tel]
 
            # deblending
            magnif = (np.power(10., - dat[2] / 2.5) - lc.params['Fb'][k]) / lc.params['Fs'][k]
            flux = magnif * lc.params['Fs'][0] + lc.params['Fb'][0]
            
            # remove points with negative flux and NaN, and compute mag
            date = dat[1]
            errbar = dat[3]
            arg = np.logical_not(np.isnan(flux)) & (flux > 0.)
            flux = flux[arg]
            date = date[arg]
            errbar = errbar[arg]
            mag = - 2.5 * np.log10(flux)

            mu = lc.content['mc']((date - lc.params['t0']) / lc.params['tE'])
            res = - 2.5 * np.log10(mu * lc.params['Fs'][0] + lc.params['Fb'][0]) - mag

            # plot light curve with error bars
            plc.circle(date, mag, size=4, alpha=1., color=color, legend=tel)

            y_err_x, y_err_y = [], []
            for px, py, err in zip(date, mag, errbar):
                y_err_x.append((px, px))
                y_err_y.append((py - err, py + err))
            plc.multi_line(y_err_x, y_err_y, color=color, alpha=0.4)
           
            # plot residuals with error bars
            pres.line((0., 1e6), (0., 0.), color='black', line_width=0.2)
            
            pres.circle(date, res, size=4, alpha=1., color=color)
            
            y_err_x, y_err_y = [], []
            for px, py, err in zip(date, res, errbar):
                y_err_x.append((px, px))
                y_err_y.append((py - err, py + err))
            pres.multi_line(y_err_x, y_err_y, color=color, alpha=0.4)
            
            # color points at data dates on trajectory
            pcc.circle(traj((date - lc.params['t0'])/lc.params['tE'])[0], traj((date - lc.params['t0'])/lc.params['tE'])[1], size=4, alpha=1., color=color)

        # group plots in one column and generate best-fits html code
        p = column([plc, pres, pcc])
        script, divini = components(p)
        div = '<div align="center">' + divini + '</div>'

        # create search map html code
        wsmap = '<img class="imageStyle" alt="' + event + '" src="./' + event + '.png' + '" width="800" />'

        # label version
        version = str(version_info).replace(', ', '.').replace('(', '(version ')

        # list of best fit paramters to download
        fitrank = event + '_rank.txt'
        
        # date and time of event's update
        if os.path.isfile(event + '/lastupdate.txt'):
            with open(event + '/lastupdate.txt', 'r') as f:
                fittime = f.read()
                f.close()
        else:
            fittime = ''
        
        # get multiple references of event
        if crossrefs:
            evname = getcrossrefs(crossrefs, event)
        else:
            evname = event
    
        # write parameters
        eventdet = 'Microlensing event ' + evname + ' : details of binary-lens model ' + str(i + 1)

        lpar = '<i>Model paramaters</i>'

        lpar += '<p align="left" style="color:#B22222">$s$={0:<.8f}<BR>$q$={1:<.8f}<BR>$u_0$={2:<.8f}<BR>$\\alpha$={3:<.5f}<BR>$t_E$={4:<.3f}<BR>$t_0$={5:<.6f}<BR>$\\rho$={6:<.8f}<BR>'.format(lc.params['s'], lc.params['q'], lc.params['u0'], lc.params['alpha'], lc.params['tE'], lc.params['t0'], lc.params['rho'])
        if 'piEN' in lc.params.keys():
            lpar += '$\\pi_{E, N}$={6:<.8f}<BR>'
        if 'piEE' in lc.params.keys():
            lpar += '$\\pi_{E, E}$={6:<.8f}<BR>'
        if 'ds' in lc.params.keys():
            lpar += '$ds/dt$={6:<.8f}<BR>'
        if 'dalpha' in lc.params.keys():
            lpar += '$d\\alpha/dt$={6:<.8f}<BR>'
        lpar += '</p><BR>'

        lpar += '<i>Light curve, residuals, source trajectory and caustics</i>'
    
        # create model selection list
        selmod = '<select name="display" onchange="location=this.value;">'
        selmod += '<option value="/miiriads/MiSMap/Events/' + event + '.html">Overview</option>'
        for j in range(len(models)):
            if j == i:
                selmod += '<option value="/miiriads/MiSMap/Events/' + event + '_' + str(j + 1) + '.html" selected>Model ' + str(j + 1) + '</option>'
            else:
                selmod += '<option value="/miiriads/MiSMap/Events/' + event + '_' + str(j + 1) + '.html">Model ' + str(j + 1) + '</option>'
        selmod += '</select>'

        # fill template html page
        fill_webpage(html_model_template, event + '/' + event + '_' + str(i + 1) + '.html', [('_LASTUPDATE_', fittime), ('_VERSION_', version), ('_EVENT_', evname), ('_EVENTDET_', eventdet), ('_MODEL_', div), ('_PARAMS_', lpar), ('_SELECT_', selmod), ('_RANK_', fitrank), ('<!-- _SCRIPT_ -->', script)])
Пример #8
0
def makehtml_event(html_event_template, event, models, datasets, trange=None, crossrefs=None):
    """Create individual event html page"""
    
    # set I/O shell display
    tcol, tun, tend, tit = "\033[0m\033[35m", "\033[0m\033[1m\033[35m", "\033[0m", "\033[0m\033[3m"
    
    # verbose
    printi(tcol + "Creating " + tit + "'" + event + '/' + event + '.html' + "'" + tcol + " web page" + tend)
    if crossrefs:
        printi(tcol + "All names of event: " + tit + crossrefs + tend)

    # date and time of event's update
    with open(event + '/lastupdate.txt', 'w') as f:
        fittime = strftime('- Last update: %d/%m/%Y at %H:%M:%S UT', gmtime())
        f.write(fittime)
        f.close()

    # create bokeh column of best-fit models
    mc = MagnificationCurve()
    lc = LightCurve(datasets, trange=trange, dmag=None)
    si = list()
    titlmod = 'Binary-lens model '
    tools = ["pan", "wheel_zoom", "box_zoom", "undo", "redo", "reset", "save"]
    active_drag = "pan"
    active_scroll = "wheel_zoom"

    for i in range(len(models)):
        
        printi(tcol + "Computing " + tit + "model " + str(i + 1) + tend)
        
        # compute best-fit light curve
        mc.create(models[i])
        lc.fit(mc, 'central', '+', init=None)
        
        # bind x-axis on first plot
        if i == 0:
            plot = bplt.figure(width=800, plot_height=400, title=titlmod + '1', tools=tools, active_drag=active_drag, active_scroll=active_scroll)
            si.append(plot)
            if trange:
                tmin = lc.params['t0'] - 1.5 * lc.params['tE']
                tmax = lc.params['t0'] + 1.5 * lc.params['tE']
                si[0].x_range = Range1d(tmin, tmax)
        else:
            plot = bplt.figure(width=800, plot_height=400, title=titlmod + str(i + 1), x_range=si[0].x_range, tools=tools, active_drag=active_drag, active_scroll=active_scroll)
            si.append(plot)

        # compute blended flux
        flux = lc._mu * lc.params['Fs'][0] + lc.params['Fb'][0]
        t = lc._t * lc.params['tE'] + lc.params['t0']

        # remove points with negative flux and NaN
        arg = np.logical_not(np.isnan(flux)) & (flux > 0.)
        flux = flux[arg]
        t = t[arg]

        # invert y-axis
        si[i].y_range = Range1d(1.02 * np.max(-2.5 * np.log10(flux)), 0.98 * np.min(-2.5 * np.log10(flux)))

        # theoretical ligth curve
        si[i].line(t, -2.5 * np.log10(flux), color='darkorange')

        # add data on plot
        k = 0
        atel = iter(color_tel)
        for dat in lc._datalist:
            # color and legend
            _ , datai = split(dat[6])
            tel = datai[0]
            if tel not in color_tel:
                tel = atel.next()
            color = color_tel[tel]
 
            # deblending
            magnif = (np.power(10., - dat[2] / 2.5) - lc.params['Fb'][k]) / lc.params['Fs'][k]
            flux = magnif * lc.params['Fs'][0] + lc.params['Fb'][0]
            
            # remove points with negative flux and NaN, and compute mag
            date = dat[1]
            errbar = dat[3]
            arg = np.logical_not(np.isnan(flux)) & (flux > 0.)
            flux = flux[arg]
            date = date[arg]
            errbar = errbar[arg]
            mag = - 2.5 * np.log10(flux)
            
            # plot with error bars
            si[i].circle(date, mag, size=4, alpha=1., color=color, legend=tel)
            
            y_err_x, y_err_y = [], []
            for px, py, err in zip(date, mag, errbar):
                y_err_x.append((px, px))
                y_err_y.append((py - err, py + err))
            si[i].multi_line(y_err_x, y_err_y, color=color, alpha=0.4)

            k += 1

    # group plots in one column and generate best-fits html code
    sigrid = [s for s in si]
    p = column(sigrid)
    script, divini = components(p)
    div = '<div align="center">' + divini + '</div>'

    # create search map html code
    wsmap = '<img class="imageStyle" alt="' + event + '" src="./' + event + '.png' + '" width="800" />'
    
    # label version
    version = str(version_info).replace(', ', '.').replace('(', '(version ')
    
    # list of best fit paramters to download
    fitrank = event + '_rank.txt'

    # create model selection list
    selmod = '<select name="display" onchange="location=this.value;">'
    selmod += '<option value="/miiriads/MiSMap/Events/' + event + '.html" selected>Overview</option>'
    for j in range(len(models)):
        selmod += '<option value="/miiriads/MiSMap/Events/' + event + '_' + str(j + 1) + '.html">Model ' + str(j + 1) + '</option>'
    selmod += '</select>'

    # get multiple references of event
    if crossrefs:
        evname = getcrossrefs(crossrefs, event)
    else:
        evname = event

    # fill template html page
    fill_webpage(html_event_template, event + '/' + event + '.html', [('_LASTUPDATE_', fittime), ('_VERSION_', version), ('_EVENT_', evname), ('_SEARCHMAP_', wsmap), ('_MODELS_', div), ('_SELECT_', selmod), ('_RANK_', fitrank), ('<!-- _SCRIPT_ -->', script)])
Пример #9
0
def reformat(infilename, outfilename, cols, offset=0.):
    """Reformat data files to be used by muLAn
        
        Calling formatdata
        ==================
        reformat(infilename, outfilename, cols)
        
        Usage
        -----
        Enter in cols the list of columns description (i.e. keywords,
        see below) in the order they appear in the input file.
        
        Parameters
        ----------
        infilename: string
            Name of input data file.
        outfilename: string
            Name of output data file in muLAn format.
        cols: sequence of strings
            Mandatory keywords are:
                'hjd': Julian date or modified Julian date.
                'mag': magnitude.
                'errmag': error in magnitude.
            Optional keywords are:
                'seeing': seeing.
                'backg': background.
            For useless columns, use e.g. 'other'

        Examples
        --------
        >>> formatdata('data.dat', 'data_muLAn.dat',
                ['hjd', 'mag', 'errmag', 'seeing', 'backg'])
        
        >>> formatdata('data.dat', 'data_muLAn.dat',
                ['other', 'hjd', 'mag', 'errmag'])
        """
    # set I/O shell display
    tbf, tcol, tend, tit = "\033[0m\033[1m", "\033[0m\033[35m", "\033[0m", "\033[0m\033[3m"
    
    # check mandatory keywords
    mandatkeys = ['hjd', 'mag', 'errmag']
    for key in mandatkeys:
        # check whether all mandatory keywords are present
        if key not in cols:
            raise ValueError("mandatory column {} missing".format(key))
        # check whether keywords appear only once
        if cols.count(key) > 1:
            raise ValueError("column {} appears more than once".format(key))

    # check if input file exists
    if not os.path.isfile(infilename):
        raise IOError("file '" + infilename + "' is missing")
    
    # limit number of columns to read
    usecols = range(len(cols))
    # reading input data file
    dtype = {'names': tuple(cols), 'formats': tuple(['S50' for c in cols])}
    data = np.loadtxt(infilename, dtype=dtype, usecols=usecols, unpack=False)
    # re-order columns
    newfile = ''
    for i in range(len(data['hjd'])):
        # check whether date is in HJD or MHJD, and correct it
        mhjd = float(data['hjd'][i]) - 2450000.
        if mhjd > 0.:
            data['hjd'][i] = str(mhjd)
        # add offset to agnitudes is required
        mag = float(data['mag'][i]) + offset
        data['mag'][i] = str(mag)
        # mandatory keywords
        newfile = newfile + repr(i + 1) + ' ' + data['hjd'][i] + ' ' + data['mag'][i] + ' ' + data['errmag'][i]
        # optional keywords
        if 'seeing' in cols:
            newfile = newfile + ' ' + data['seeing'][i]
        else:
            newfile = newfile + ' 0'
        if 'backg' in cols:
            newfile = newfile + ' ' + data['backg'][i] + '\n'
        else:
            newfile = newfile + ' 0\n'

    # create output data file in muLAn format
    outfile = open(outfilename, 'w')
    outfile.write(newfile)
    outfile.close()
    
    # verbose
    printi(tcol + "Reformat data file " + tit + infilename + tcol + " to " + tit + outfilename + tend)
Пример #10
0
def addkmt(event, kmtname):
    """Add munally KMTNet data to a new or existing event"""
    # set I/O shell display
    tcol, tun, tend, tit = "\033[0m\033[34m", "\033[0m\033[1m\033[34m", "\033[0m", "\033[0m\033[3m"
    
    # check event name format
    if len(event) != 8 or len(kmtname) != 8:
        raise ValueError("wrong format for event name: correct is 'xByyzzzz'")
    
    printi(tcol + "Try fetch KMT data " + tit + kmtname + tcol + " for event " + tit + event + tend)

    # get microlensing season
    year = '20' + event[2:4]

    # directories arborescence
    datapath = event + '/data/'
    kmtdir = datapath + 'kmt/'
    fitsdir = event + '/fits/'
    if not os.path.isdir(event):
        # create dir: ./event/
        printi(tcol + "Create new event folder " + tit + event + "/" + tend)
        proc = subprocess.Popen('mkdir ' + event, shell=True, executable='/bin/bash')
        proc.wait()
#    else:
#        printi(tcol + "Update event " + tit + event + tend)
    if not os.path.isdir(datapath):
        # create dir: ./event/data/
        proc = subprocess.Popen('mkdir ' + datapath, shell=True, executable='/bin/bash')
        proc.wait()
    if not os.path.isdir(fitsdir):
        # create dir: ./event/fits/
        proc = subprocess.Popen('mkdir ' + fitsdir, shell=True, executable='/bin/bash')
        proc.wait()
    if not os.path.isdir(kmtdir):
        # create dir: ./event/data/kmt/
        proc = subprocess.Popen('mkdir ' + kmtdir, shell=True, executable='/bin/bash')
        proc.wait()

    # get data and uncompress
    url = 'http://kmtnet.kasi.re.kr/ulens/event/' + year + '/data/' + kmtname + '/pysis/pysis.tar.gz'
    remotefile = urllib2.urlopen(url)
    arch = remotefile.read()
    with open(kmtdir + 'pysis.tar.gz', 'wb') as f:
        f.write(arch)
        f.close()

    # check whether data are new : if not, stop and returne False
    if os.path.isfile(kmtdir + 'kmt.tar.gz'):
        if os.path.getsize(kmtdir + 'pysis.tar.gz') == os.path.getsize(kmtdir + 'kmt.tar.gz'):
            return False

    proc = subprocess.Popen('cp ' + kmtdir + 'pysis.tar.gz ' + kmtdir + 'kmt.tar.gz', shell=True, executable='/bin/bash')
    proc.wait()

    # untargz data
    proc = subprocess.Popen('cd ' + kmtdir + ' ; tar -xvzf kmt.tar.gz', shell=True, executable='/bin/bash')
    proc.wait()

    # fetch datasets and reformat data
#    dr = ' ' + kmtdir
#    for obs in ['A', 'C', 'S']:
    listdat = fnmatch.filter(os.listdir(kmtdir), 'KMT*_I.pysis')
    for dat in listdat:
        reformat(kmtdir + dat, datapath + dat[3:6] + event[2:8] + 'I.dat', ['hjd', 'dflux', 'fluxerr', 'mag', 'errmag', 'seeing', 'backg'])


#        com = dr.join(['cat '] + L) + ' > ' + kmtdir + 'tmp.pysis'
#        proc = subprocess.Popen(com, shell=True, executable='/bin/bash')
#        proc.wait()
#        reformat(kmtdir + 'tmp.pysis', datapath + obs + event + 'I.dat', ['hjd', 'dflux', 'fluxerr', 'mag', 'errmag', 'seeing', 'backg'])

    return True
Пример #11
0
def retrieve(event, crossrefs=None, download=True):
    """Create or update microlensing event data files
        
        Calling
        =======
        retrieve(event)
        
        Parameters
        ----------
        event : string
            Name of microlensing event: 'xByyzzzz', where:
                x is O (OGLE alert) or B (MOA alert)
                yy is the year
                zzzz is the event ID
        Example
        -------
        >>> datasets = retrieve('OB180222')
        """
    # set I/O shell display
    tcol, tun, tend, tit = "\033[0m\033[34m", "\033[0m\033[1m\033[34m", "\033[0m", "\033[0m\033[3m"
    
    # check event name format
    if len(event) != 8:
        raise ValueError("wrong format for event name: correct is 'xByyzzzz'")

    # get microlensing season
    year = '20' + event[2:4]

    # directories arborescence
    datapath = event + '/data/'
    syncdir = datapath + 'sync/'
    fitsdir = event + '/fits/'
    if not os.path.isdir(event):
        # create dir: ./event/
        printi(tcol + "Create new event folder " + tit + event + "/" + tend)
        proc = subprocess.Popen('mkdir ' + event, shell=True, executable='/bin/bash')
        proc.wait()
#    else:
#        printi(tcol + "Update event " + tit + event + tend)
    if not os.path.isdir(datapath):
        # create dir: ./event/data/
        proc = subprocess.Popen('mkdir ' + datapath, shell=True, executable='/bin/bash')
        proc.wait()
    if not os.path.isdir(syncdir):
        # create dir: ./event/data/sync/
        proc = subprocess.Popen('mkdir ' + syncdir, shell=True, executable='/bin/bash')
        proc.wait()
    if not os.path.isdir(fitsdir):
        # create dir: ./event/fits/
        proc = subprocess.Popen('mkdir ' + fitsdir, shell=True, executable='/bin/bash')
        proc.wait()

    # check whether to get new data, loca data or stop
    if download:
        # if crossfrefs file is given
        newkmt = []
        if crossrefs:
            newsync = 3
            refs = getcrossrefs(crossrefs, event)
#            newkmt = []
            for ref in refs.split():
                if 'K' in ref:
                    newkmt = addkmt(event, ref)
                elif newsync != 0:
                    newsync = rsync_ARTEMiS_data(ref, syncdir)
    
        # if crossfrefs file not given:
        else:
            if 'K' in event:
                newkmt = addkmt(event, event)
            else:
                newsync = rsync_ARTEMiS_data(event, syncdir)

        # check if proceeds
        if not (newsync == 0 or newkmt):
            printi(tun + "No new data available for " + tit + event + tun + " (nothing to do)" + tend)
            sys.exit(0)
        if newkmt:
            printi(tun + "New KMT data available for " + tit + event + tend)
        if newsync == 0:
            printi(tun + "New SYNC data available for " + tit + event + tend)
    else:
        printi(tcol + "Use local datasets" + tend)

    # create final list od datasets
#    printi(tcol + "Create final list of datasets" + tend)
#    kevent = event
#    syncset = fnmatch.filter(os.listdir(syncdir), '*' + event.replace('M', 'K') + '*.dat')
    syncset = fnmatch.filter(os.listdir(syncdir), '*.dat')
    for ds in syncset:
        reformat(syncdir + ds, datapath + ds.replace('K', 'M'), ['mag', 'errmag', 'hjd'])
#
#        if 'K' in ds:
#            proc = subprocess.Popen('cp ' + syncdir + ds + ' ' + syncdir + ds.replace('K', 'M'), shell=True, executable='/bin/bash')
#            proc.wait()
#    syncset = [ds.replace('K', 'M') for ds in syncset]

    # fetch other datasets in data/
    othersets = fnmatch.filter(os.listdir(datapath), '*.dat')
    if os.path.isfile(datapath + 'params.dat'):
        othersets.remove('params.dat')

    # remove duplicates
    datasets = list(np.unique(syncset + othersets))

    # discard symblinks (useful only if rsync is used without -L option)
    datasets = [dataset for dataset in datasets if not os.path.islink(syncdir + dataset)]

    # get OGLE first, if not MOA first, otherwise keep order
    datasets = orderlist(datasets)

## reformat only sync/ datasets ## A FAIRE PLUS HAUT !!!
#    for dataseti in datasets:
#        if os.path.isfile(syncdir + dataseti):
#            reformat(syncdir + dataseti, datapath + dataseti, ['mag', 'errmag', 'hjd'])

    # add relative path to datasets names
    for i in range(len(datasets)):
        datasets[i] = datapath + datasets[i]

    printi(tcol + "Found " + tit + str(len(datasets)) + tcol + " dataset(s)" + tend)
    printd(tit + "  " + str(datasets) + tend)

    return datasets
Пример #12
0
def _process_fits((mclib, lclib, datasets, trange, init)):
    """Process of fit_subgrids"""

    # set I/O shell display
    tfil, tun, tcol, tit, tend = "\033[0m\033[1;35m", "\033[0m\033[1;31m", "\033[0m\033[31m", "\033[0m\033[3m", "\033[0m"

    # check that mclib(.hdf5) exists
    if not os.path.isfile(mclib):
        raise IOError("file '" + mclib + "' is missing")

    # verbose
    printi(tcol + "Launching " + tit + "'" + mclib + "'" + tcol +
           " grid with PID " + tit + str(os.getpid()) + tend)

    with h5py.File(mclib, 'r') as fmclib:
        # NEW read datasets and time range
        mc = MagnificationCurve()
        lc = LightCurve(datasets, trange=trange)

        # global subgrid attributes
        Ns = fmclib.attrs['Ns']
        Nq = fmclib.attrs['Nq']

        # prepare grid
        grids = np.empty(Ns * Nq, dtype=np.float_)
        gridq = np.empty(Ns * Nq, dtype=np.float_)
        gridu0 = np.empty(Ns * Nq, dtype=np.float_)
        gridalpha = np.empty(Ns * Nq, dtype=np.float_)
        gridtE = np.empty(Ns * Nq, dtype=np.float_)
        gridt0 = np.empty(Ns * Nq, dtype=np.float_)
        gridrho = np.empty(Ns * Nq, dtype=np.float_)
        gridchidof = np.empty(Ns * Nq, dtype=np.float_)
        gridchi = np.empty(Ns * Nq, dtype=np.float_)
        bestmc = np.empty(Ns * Nq, dtype=np.dtype('a128'))
        for nsq in range(len(fmclib.keys())):
            sqlib = fmclib[str(nsq)]
            grids[nsq] = sqlib.attrs['s']
            gridq[nsq] = sqlib.attrs['q']
            fits = list()
            usefit = 0
            for id in sqlib:
                # read mc
                f_u0, f_alpha, f_tE, f_t0, f_rho, f_chidof, f_chi = [],[],[],[],[],[],[]
                mc.read(mclib, str(nsq) + '/' + id)

                # fit only if ∆mag(th) > ∆mag(exp)
                dmag = 2.5 * np.log10(
                    fmclib[str(nsq) + '/' + id].attrs['mumax'])

                if dmag < lc.content['dmag']:
                    printi(tfil + "Model delta(mag) too low : skipping" + tend)
                    printd(tit + "  (delta(mag_th) = " + str(dmag) + " < " +
                           str(lc.content['dmag']) + tend)
                else:
                    usefit += 1

                    # read reference croi in mc
                    croi = fmclib[str(nsq) + '/' + id].attrs['refcroi']

                    # fit for 0 < alpha < π/2
                    lc.fit(mc, croi, '+', init=init)
                    f_u0.append(lc.params['u0'])
                    f_alpha.append(lc.params['alpha'])
                    f_tE.append(lc.params['tE'])
                    f_t0.append(lc.params['t0'])
                    f_rho.append(lc.params['rho'])
                    f_chidof.append(lc.content['chi2'][0] /
                                    lc.content['chi2'][1])
                    f_chi.append(lc.content['chi2'][0])

                    # fit for π < alpha < 3π/2)
                    lc.fit(mc, croi, '-', init=init)
                    f_u0.append(lc.params['u0'])
                    f_alpha.append(lc.params['alpha'])
                    f_tE.append(lc.params['tE'])
                    f_t0.append(lc.params['t0'])
                    f_rho.append(lc.params['rho'])
                    f_chidof.append(lc.content['chi2'][0] /
                                    lc.content['chi2'][1])
                    f_chi.append(lc.content['chi2'][0])

                    # add fit to list if chi2 is not inf
                    if not np.all(np.isinf(f_chidof)):
                        arg = np.argmin(f_chidof)
                        u0 = f_u0[arg]
                        alpha = f_alpha[arg]
                        tE = f_tE[arg]
                        t0 = f_t0[arg]
                        rho = f_rho[arg]
                        chidof = f_chidof[arg]
                        chi = f_chi[arg]
                        fits.append([id, u0, alpha, tE, t0, rho, chidof, chi])

            # verbose
            printd(tcol +
                   "Percentage of useful magnification curves is about " +
                   tit + "{0:.0f}".format(100. * float(usefit) / float(id)) +
                   "%" + tend)

            if fits:
                # sort fits by increasing chi2 and get parameters
                fits = np.array(fits)
                arg = np.argsort(np.array(fits[:, 6], dtype=np.float_))
                mcs = np.array(fits[arg, 0], dtype=np.int_)
                u0 = np.array(fits[arg, 1], dtype=np.float_)
                alpha = np.array(fits[arg, 2], dtype=np.float_)
                tE = np.array(fits[arg, 3], dtype=np.float_)
                t0 = np.array(fits[arg, 4], dtype=np.float_)
                rho = np.array(fits[arg, 5], dtype=np.float_)
                chidof = np.array(fits[arg, 6], dtype=np.float_)
                chi = np.array(fits[arg, 7], dtype=np.float_)

                # save best-fit parameters and chi2/dof
                gridu0[nsq] = u0[0]
                gridalpha[nsq] = alpha[0]
                gridtE[nsq] = tE[0]
                gridt0[nsq] = t0[0]
                gridrho[nsq] = rho[0]
                gridchidof[nsq] = chidof[0]
                gridchi[nsq] = chi[0]
                bestmc[nsq] = str(nsq) + '/' + str(mcs[0])

                # verbose
                printi(tcol + "Best-fit model at grid point " + tit + "'" +
                       str(nsq) + "'" + tcol + " in file " + tit + mclib +
                       tcol + " is " + tit + "'" + str(mcs[0]) + "'" + tcol +
                       " with " + tit + "chi2/dof={:.3e}".format(chidof[0]) +
                       tend)
            else:
                gridchidof[nsq] = np.inf
                gridchi[nsq] = np.inf

        # save log(X^2) map in HDF5 file: overwrite existing file
        with h5py.File(lclib, 'w') as fitres:
            gS = np.unique(grids)
            gQ = np.unique(gridq)
            gs, gq = np.meshgrid(gS, gQ)

            fitres.create_dataset('s', data=gs)
            fitres.create_dataset('q', data=gq)
            fitres.create_dataset('u0', data=gridu0.reshape(Ns, Nq).T)
            fitres.create_dataset('alpha', data=gridalpha.reshape(Ns, Nq).T)
            fitres.create_dataset('tE', data=gridtE.reshape(Ns, Nq).T)
            fitres.create_dataset('t0', data=gridt0.reshape(Ns, Nq).T)
            fitres.create_dataset('rho', data=gridrho.reshape(Ns, Nq).T)
            fitres.create_dataset('chidof', data=gridchidof.reshape(Ns, Nq).T)
            fitres.create_dataset('chi', data=gridchi.reshape(Ns, Nq).T)

            fitres.flush()
            fitres.close()
        fmclib.flush()
        fmclib.close()

    # verbose
    printi(tun + "Light curve grid " + tit + "'" + lclib + "'" + tun +
           " complete" + tend)
Пример #13
0
def _process_grids((mclib, grid, nmc, pcaus, f_rcroi)):
    """Process of compute_subgrids"""
    # set I/O shell display
    tun, tcol, tit, tend = "\033[0m\033[1;31m", "\033[0m\033[31m", "\033[0m\033[3m", "\033[0m"

    # verbose
    printi(tcol + "Launching " + tit + "'" + mclib + "'" + tcol +
           " grid with PID " + tit + str(os.getpid()) + tend)

    # create mc of current sub-grid
    mc = MagnificationCurve()
    params = dict()
    k = 0
    for params['s'] in grid[0]:
        for params['q'] in grid[1]:

            # get reference parameters of mc
            mc.create({'s': params['s'], 'q': params['q']}, calcmc=False)

            # compute mc grid
            grpname = str(k)
            for id in range(nmc):

                mcid = grpname + '/' + str(id)

                # check if dataset exists
                go = True
                if os.path.isfile(mclib):

                    with h5py.File(mclib, 'r') as fmclib:
                        go = mcid not in fmclib
                        fmclib.flush()
                        fmclib.close()

                if go:

                    # generate random central/secondary trajectories
                    croi = np.random.choice(['central', 'secondary'],
                                            p=[pcaus, 1. - pcaus])
                    if mc.content['topo'] == 'interm':
                        cx, cy, r = mc.content['croi']['resonant']
                    if mc.content['topo'] == 'close':
                        if croi == 'secondary':
                            cx, cy, r = mc.content['croi']['secondary_up']
                        else:
                            cx, cy, r = mc.content['croi']['central']
                    if mc.content['topo'] == 'wide':
                        cx, cy, r = mc.content['croi'][croi]

                    # generate rho and alpha
                    params['rho'] = np.power(10.,
                                             np.random.uniform(-3.5, -1.5))
                    params['alpha'] = np.random.uniform(0., np.pi / 2.)

                    # generate u0
                    #   u0c: trajectory through selected croi center
                    u0c = -cx * np.sin(params['alpha']) + cy * np.cos(
                        params['alpha'])
                    #   uc: local centered on selected croi
                    ucm = f_rcroi * r
                    uc = np.random.uniform(-ucm, ucm)
                    params['u0'] = uc + u0c

                    # create mc
                    mc.create(params)

                    # write metadata and mc
                    attrs = {
                        'Ns': len(grid[0]),
                        'Nq': len(grid[1]),
                        grpname + '/s': params['s'],
                        grpname + '/q': params['q'],
                        mcid + '/refcroi': croi
                    }

                    mc.write(mclib, mcid, attrs=attrs)

                else:
                    printi(tcol + "Magnification curve '" + tit + mcid + "'" +
                           tcol + "already exists : skipping" + tend)
            k += 1

    # verbose
    printi(tun + "Magnification curve grid " + tit + "'" + mclib + "'" + tun +
           " complete" + tend)
Пример #14
0
def compute_subgrids(gridsprefix, gridlist, nprocs=1, f_rcroi=2.):
    """Create magnification curve sub-grids (HDF5 files)"""
    # set I/O shell display
    tcol, tit, tend = "\033[0m\033[31m", "\033[0m\033[3m", "\033[0m"

    # check whether input names does not contain extensions
    if '.hdf5' in gridsprefix:
        raise NameError("grid prefix should not contain .hdf5 extension")

    # create name
    grid = gridsprefix + '.hdf5'

    # check weather library exists
    if not os.path.isfile(grid):
        raise IOError("file '" + grid + "' is missing")

    # verbose
    printd(tcol + "Grid " + tit + "'" + grid + "'" + tcol + " chosen" + tend)

    # mutliprocessing: create grid list names
    listmclibs, listgrids = list(), list()
    with h5py.File(grid, 'r') as fgrid:
        Ngs = fgrid.attrs['Ngs']
        Ngq = fgrid.attrs['Ngq']

        nmc = fgrid.attrs['nmc']
        pcaus = fgrid.attrs['pcaus']

        k = 0
        for j in range(Ngq):
            for i in range(Ngs):
                if k in gridlist:
                    # list of mc libraries to process
                    mclibk = gridsprefix + '_' + str(k) + '.hdf5'

                    #                    # if file exist, abort --> non on complete maintenant
                    #                    if os.path.isfile(mclibk):
                    #                        raise IOError("file '" + mclibk + "' already exists")

                    # add mc library to to-process list
                    listmclibs.append(mclibk)

                    # list of corresponding s,q values
                    gridi = fgrid[str(i) + ' ' + str(j)]
                    listgrids.append((gridi['s'][:], gridi['q'][:]))
                k += 1
        fgrid.flush()
        fgrid.close()

    # mutliprocessing: create arguments of _process_grids, and create workers pool
    printi(tcol + "Starting manager with PID " + tit + str(os.getpid()) +
           tcol + " running " + tit + str(nprocs) + tcol + " process(es)" +
           tend)
    listargs = zip(listmclibs, listgrids, repeat(nmc), repeat(pcaus),
                   repeat(f_rcroi))
    pool = Pool(processes=nprocs)
    pool.imap_unordered(_process_grids, listargs)

    # collect results
    pool.close()
    pool.join()
Пример #15
0
def create_maingrid(gridsprefix,
                    srange,
                    qrange,
                    majgrid,
                    mingrid,
                    nmc,
                    pcaus,
                    axis=[0.1, 10., 8e-6, 1.2]):
    """Generate an HDF5 file with definition of sub- s,q-grids
        
        Parameters
        ----------
        gridsprefix : str
            Name (without any extension) of output HDF5 library of magnifications
            curves s,q-grids and corresponding PDF map of grids.
        srange : tuple
            Global range in s. Default is: srange=(0.2, 5.0)
        qrange : tuple
            Global range in q. Default is: qrange=(1e-5, 1.)
        majgrid : tuple
            Number of sub- s,q-grids. Default is: majgrid=(12, 5)
        mingrid : tuple
            Size of sub s,q-grids. Default is: mingrid=(7, 7)
        axis : float 1-D array, optional
            Plot limits. Usage: axis=[xmin, xmax, ymin, ymax].
            Default is: axis=[0.1, 10., 8e-6, 1.].
        plot : bool, optional
            If True, display grid map on screen with pylab.show()
            and produce an output PDF file. Default is: False.
            
        Returns
        -------
        out : HDF5 file
            File containing arrays of s and q for each of the sub-grids.
        out : pylab.show() and PDF file
            If plot=True, display grid map on screen and produce a PDF file.
            
        Examples
        --------
        >>> grs = GridSearch()
        >>> grs = gengridlibs('gridlib-0')
        >>> grs.gengridlibs('gridlib-1', srange=(0.8, 1.25), qrange=(0.001, 0.1), majgrid=(3, 3), mingrid=(6, 6), axis=[0.7, 1.4, 0.0006, 0.2], plot=True)
        """
    # set I/O shell display
    tit, tcol, tend = "\033[0m\033[3m", "\033[0m\033[35m", "\033[0m"

    # check whether the grid name does not contain extensions
    if '.hdf5' in gridsprefix:
        raise NameError("name should not contain extension")

    # create file names
    pdfname = gridsprefix + '.pdf'
    libname = gridsprefix + '.hdf5'

    # check weather grid already exists
    if os.path.isfile(libname):
        raise IOError("file '" + libname + "' already exists")

    # define sub-grids
    smin, smax = srange[0], srange[1]
    qmin, qmax = qrange[0], qrange[1]
    Ngs, Ngq = majgrid[0], majgrid[1]
    Ns, Nq = mingrid[0], mingrid[1]
    S = np.empty([Ngs, Ns], dtype=np.float_)
    Q = np.empty([Ngq, Nq], dtype=np.float_)
    fullS = np.geomspace(smin, smax, Ngs * Ns, endpoint=True)
    fullQ = np.geomspace(qmin, qmax, Ngq * Nq, endpoint=True)
    for i in range(Ngs):
        S[i, ] = fullS[i * Ns:(i + 1) * Ns]
    for j in range(Ngq):
        Q[j, ] = fullQ[j * Nq:(j + 1) * Nq]

    # verbose
    printi(tcol + "Create grid " + tit + "'" + libname + "'" + tcol +
           " (view configuration:" + tit + "'" + pdfname + "'" + tcol + ")" +
           tend)
    printd(tit + "  (" + str(smin) + " ≤ s ≤ " + str(smax) + ", " + str(qmin) +
           " ≤ q ≤ " + str(qmax) + ", " + str(Ngs) + " x " + str(Ngq) +
           " sub-grids, each of size " + str(Ns) + " x " + str(Nq) + ")" +
           tend)

    # create individual grids and store in HDF5 file
    grids = list()
    with h5py.File(libname, 'w') as gridlib:
        gridlib.attrs['Ngs'] = np.shape(S)[0]
        gridlib.attrs['Ngq'] = np.shape(Q)[0]

        gridlib.attrs['nmc'] = nmc
        gridlib.attrs['pcaus'] = pcaus

        for j in range(np.shape(Q)[0]):
            for i in range(np.shape(S)[0]):
                grids.append((S[i, ], Q[j, ]))
                gridlibk = gridlib.create_group(u'' + str(i) + ' ' + str(j))
                gridlibk.create_dataset('s', data=S[i, ])
                gridlibk.create_dataset('q', data=Q[j, ])
        gridlib.flush()
        gridlib.close()

    # plot template grid
    plt.rc('font', size=14)
    plt.close('all')
    fig, MAP = plt.subplots(1, figsize=(8, 6))
    MAP.set_xscale('log')
    MAP.set_yscale('log')
    MAP.set_xlim([axis[0], axis[1]])
    MAP.set_ylim([axis[2], axis[3]])
    plt.subplots_adjust(left=0.16,
                        bottom=0.13,
                        right=0.94,
                        top=0.94,
                        wspace=None,
                        hspace=None)
    MAP.set_title(r'Binary-lens search map template')
    MAP.set_xlabel(r'$s$')
    MAP.set_ylabel(r'$q$')
    for k in range(len(grids)):
        SP, QP = np.meshgrid(grids[k][0], grids[k][1])
        MAP.scatter(SP, QP, marker='*', s=12)
        plt.text(grids[k][0][0], grids[k][1][0], str(k), fontsize=15)
    plt.xticks(np.array([0.2, 0.3, 0.5, 0.7, 1, 2, 3, 4, 5]),
               np.array(['0.2', '0.3', '0.5', '0.7', '1', '2', '3', '4', '5']))
    plt.savefig(pdfname)
Пример #16
0
def process_results(gridsprefix, fitsprefix, nmod=9):
    """Process fit results
        
        nmod : int
            Maximal number of output models.
        """
    # set I/O shell display
    tcol, tun, tend, tit = "\033[0m\033[35m", "\033[0m\033[1m\033[35m", "\033[0m", "\033[0m\033[3m"

    # check that names do not have extension
    if '.hdf5' in gridsprefix:
        raise NameError("grid prefix should not contain .hdf5 extension")
    if '.hdf5' in fitsprefix:
        raise NameError("fit prefix should not contain .hdf5 extension")

    # check that mcgrid(.hdf5) exists
    grid = gridsprefix + '.hdf5'
    if not os.path.isfile(grid):
        raise IOError("file '" + grid + "' is missing")

    # verbose
    printd(tcol + "Grid file " + tit + "'" + grid + "'" + tend)

    # collect fit files and fill missing ones
    missingfits, missing = [], []
    with h5py.File(grid, 'r') as fgrid:
        Ngs = fgrid.attrs['Ngs']
        Ngq = fgrid.attrs['Ngq']
        k = 0
        Cgs, Cgq, Cgu0, Cgalpha, CgtE, Cgt0, Cgrho, Cgchidof, Cgchi = [],[],[],[],[],[],[],[],[]
        for j in range(Ngq):
            Lgs, Lgq, Lgu0, Lgalpha, LgtE, Lgt0, Lgrho, Lgchidof, Lgchi = [],[],[],[],[],[],[],[],[]
            for i in range(Ngs):
                gridfitsk = fitsprefix + '_' + str(k) + '.hdf5'
                if os.path.isfile(gridfitsk):
                    # fit file exists
                    with h5py.File(gridfitsk, 'r') as fgridfitsk:
                        Lgs.append(fgridfitsk['s'][:])
                        Lgq.append(fgridfitsk['q'][:])
                        Lgu0.append(fgridfitsk['u0'][:])
                        Lgalpha.append(fgridfitsk['alpha'][:])
                        LgtE.append(fgridfitsk['tE'][:])
                        Lgt0.append(fgridfitsk['t0'][:])
                        Lgrho.append(fgridfitsk['rho'][:])
                        Lgchidof.append(fgridfitsk['chidof'][:])
                        Lgchi.append(fgridfitsk['chi'][:])
                        fgridfitsk.flush()
                        fgridfitsk.close()
                else:
                    # fit file is missing
                    default = fgrid[str(i) + ' ' + str(j)]
                    meshs, meshq = np.meshgrid(default['s'][:],
                                               default['q'][:])
                    Lgs.append(meshs)
                    Lgq.append(meshq)
                    fails = np.full_like(meshs, np.inf)
                    Lgu0.append(fails)
                    Lgalpha.append(fails)
                    LgtE.append(fails)
                    Lgt0.append(fails)
                    Lgrho.append(fails)
                    Lgchidof.append(fails)
                    Lgchi.append(fails)
                    missingfits.append(gridfitsk)
                    missing.append((default['s'][:], default['q'][:]))
                k += 1
            Cgs.append(np.concatenate(Lgs, axis=1))
            Cgq.append(np.concatenate(Lgq, axis=1))
            Cgu0.append(np.concatenate(Lgu0, axis=1))
            Cgalpha.append(np.concatenate(Lgalpha, axis=1))
            CgtE.append(np.concatenate(LgtE, axis=1))
            Cgt0.append(np.concatenate(Lgt0, axis=1))
            Cgrho.append(np.concatenate(Lgrho, axis=1))
            Cgchidof.append(np.concatenate(Lgchidof, axis=1))
            Cgchi.append(np.concatenate(Lgchi, axis=1))
        fgrid.flush()
        fgrid.close()
    s = np.concatenate(Cgs, axis=0)
    q = np.concatenate(Cgq, axis=0)
    u0 = np.concatenate(Cgu0, axis=0)
    alpha = np.concatenate(Cgalpha, axis=0)
    tE = np.concatenate(CgtE, axis=0)
    t0 = np.concatenate(Cgt0, axis=0)
    rho = np.concatenate(Cgrho, axis=0)
    chidof = np.concatenate(Cgchidof, axis=0)
    chi = np.concatenate(Cgchi, axis=0)

    search_map = [s, q, chidof, chi, missing]

    # verbose
    if missingfits:
        printi(tcol + "Fit crashed for " + tit + str(len(missingfits)) + tcol +
               " sub-grids" + tend)
        for mi in missingfits:
            printd(tit + "  ('" + mi + "')" + tend)

    # order models by X^2
    ind = np.unravel_index(np.argsort(chidof, axis=None), chidof.shape)

    models = list()
    for i in range(nmod):
        params = [
            u0[ind][i], alpha[ind][i], tE[ind][i], t0[ind][i], rho[ind][i]
        ]
        if np.any(np.isinf(params)):
            nmod = i
            break
        models.append({
            's': s[ind][i],
            'q': q[ind][i],
            'u0': u0[ind][i],
            'alpha': alpha[ind][i],
            'tE': tE[ind][i],
            't0': t0[ind][i],
            'rho': rho[ind][i]
        })

    # list best-fit parameters
    befi = "  {0:<2s} {1:<9s} {2:<11s} {3:<12s} {4:<10s} {5:<10s} {6:<12s} {7:<11s} {8:<10s} {9:<12s}\n".format(
        '', 's', 'q', 'u0', 'alpha', 'tE', 't0', 'rho', 'X^2/dof', 'X^2')
    for i in range(nmod):
        befi += "  {0:<2d} {1:<9.6f} {2:<11.4e} {3:<+12.4e} {4:<10f} {5:<10f} {6:<12f} {7:<11.4e} {8:<10f} {9:<12.1f}\n".format(
            i + 1, s[ind][i], q[ind][i], u0[ind][i], alpha[ind][i], tE[ind][i],
            t0[ind][i], rho[ind][i], chidof[ind][i], chi[ind][i])

    # create _rank.txt output file
    f = open(fitsprefix + '_rank.txt', 'w')
    f.write(befi)
    f.close()

    # verbose
    printi(tcol + "Best-fit models ranking:\n" + tend + befi)

    return search_map, models