Пример #1
0
def output_T (samples, dic, outputfile):
	fout = open(outputfile, 'w')
	index = dic.keys()
	fout.write('cell\t' + string.join(index,'\t') +'\n')
	for i in range(0, len(samples)):
		sample = samples[i]
		fout.write(sample)
		for key in index:
			try:
				fout.write('\t' + numpy.format_float_positional(dic[key][i]))
			except:
				fout.write('\t' + dic[key][i])
		fout.write('\n')
	fout.close()
Пример #2
0
def format_float(num):
    return np.format_float_positional(num, trim='-')
Пример #3
0
def standarddeviation(value):
    print(str(np.format_float_positional(np.std(np.array(value)), 2)))
Пример #4
0
def num2str(num, width, dec=False, space=0, eng=False):
    ''' Converts a number to a string using F70-like formatting.

    Purpose
    -------
	To easily convert numbers to strings while complying with F70/F90 string
    requirements.

    Example: for 'F10.0' use: width = 10, dec = False
             for 'F5.2'  use: width =  5, dec = 2
    
    Parameters
    ----------
    num : floats
        number to be transformed to string.

    width : int
        number of characters that number will occupy. 
   
    dec : int (optional) 
        number of decimals to include. When none passed, defaults to returning
        as many decimals as fit within specified width.

    space : int (optional)
        blank characters to keep between adjecent numbers
        THIS ARGUMENT WILL ONLY BE USED IF DEC = FALSE.

    eng : bool (optional)
        if true, it will use scientific format instead of floating point format
        defaults to false
        
    Returns
    -------
    str_out : str
        number transformed into a string based on specified formatting.

    Notes
    -----
    * Careful that the width and decimals specified work for the given number,
      otherwise, functions will raise exceptions.
	* Probably not the smartest way of doing things, but I'm tired and grumpy.
    '''

    # If engineering, format as such
    if eng:
        kwargs = {'trim': '.', 'sign': False, 'exp_digits': 0}
        len_exp = 2 + len(str(int(abs(np.log(num)))))  # length of exponent

        if dec:
            # Raise error if there isnt enough space for the number
            if width - dec - len_exp - 2 < 0:
                raise Exception('cant format eng number; does not fit :( ')
            kwargs.update({'precision': dec})

        else:
            # Raise error if there isnt enough space for the number
            if width - space - len_exp - 2 < 0:
                raise Exception('cant format eng number; does not fit :( ')

            # Format number
            len_dec = width - len_exp - space - 2  # Available space for decimal
            if num < 0: len_dec -= 1
            kwargs.update({'precision': len_dec})
            len_all = len(np.format_float_scientific(num, **kwargs))

            # Correct for missing padding
            pad = width - len_all
            kwargs.update({'pad_left': pad})

        str_out = np.format_float_scientific(num, **kwargs)

    # Otherwise, format as floating point number
    else:

        # Round if required
        if dec:
            num = np.round(num, dec)

        # Determine number of digits
        digits = len(str(int(num)))

        # Raise error if digits don't fit in specified width
        if digits > width:
            raise Exception('Digits > width | Use scientific?')

        # Raise error if digits + dec + space don't fit in specified width
        if digits + dec + space + 1 > width:
            raise Exception('Digits + dec + space > width | Use scientific?')

        # Specify lpad and prec if exact digits were required
        if dec:
            prec = dec
            lpad = max(0, width - prec - 1)

        # Specify lpad and prec if there are no decimals in digit
        elif digits == len(str(num)):
            lpad = width - space
            prec = 0

        # Specify lapd and prec if there are decimals
        else:
            prec = max(0, width - space - digits - 1)
            lpad = max(0, width - prec - 1)

        # Convert digit to string (note that pad includes digits!!) and output
        frmt = {
            'precision': prec,
            'pad_left': lpad,
            'unique': True,
            'trim': '-'
        }
        str_out = np.format_float_positional(num, **frmt)
        lpad += width - len(str_out)
        frmt.update({'pad_left': lpad})
        str_out = np.format_float_positional(num, **frmt)

    return (str_out)
Пример #5
0
def format_float(value):
    return np.format_float_positional(value, unique=True, trim="0")
Пример #6
0
def tls_search(time,
               flux,
               flux_err,
               known_transits=None,
               tls_kwargs=None,
               wotan_kwargs=None,
               options=None):
    '''
    Summary:
    -------
    This runs TLS on these data with the given infos
    
    Inputs:
    -------
    time : array of flaot
        time stamps of observations
    flux : array of flaot
        normalized flux
    flux_err : array of flaot
        error of normalized flux
        
        
    Optional Inputs:
    ----------------
    tls_kwargs : None or dict, keywords:
        R_star : float
            radius of the star (e.g. median)
            default 1 R_sun (from TLS)
        R_star_min : float
            minimum radius of the star (e.g. 1st percentile)
            default 0.13 R_sun (from TLS)
        R_star_max : float
            maximum radius of the star (e.g. 99th percentile)
            default 3.5 R_sun (from TLS)
        M_star : float
            mass of the star (e.g. median)
            default 1. M_sun (from TLS)
        M_star_min : float
            minimum mass of the star (e.g. 1st percentile)
            default 0.1 M_sun (from TLS)
        M_star_max : float
            maximum mass of the star (e.g. 99th percentile)
            default 1. M_sun (from TLS)    
        u : list
            quadratic limb darkening parameters
            default [0.4804, 0.1867]
        ...
            
    SNR_threshold : float
        the SNR threshold at which to stop the TLS search
        
    known_transits : None or dict
        if dict and one transit is already known: 
            known_transits = {'period':[1.3], 'duration':[2.1], 'epoch':[245800.0]}
        if dict and multiple transits are already known: 
            known_transits = {'name':['b','c'], 'period':[1.3, 21.0], 'duration':[2.1, 4.1], 'epoch':[245800.0, 245801.0]}
        'period' is the period of the transit
        'duration' must be the total duration, i.e. from first ingress point to last egrees point, in days
        'epoch' is the epoch of the transit
        
    options : None or dict, keywords:
        show_plot : bool
            show a plot of each phase-folded transit candidate and TLS model in the terminal 
            default is False
        save_plot : bool
            save a plot of each phase-folded transit candidate and TLS model into outdir
            default is False
        outdir : string
            if None, use the current working directory
            default is ""
        
    Returns:
    -------
    List of all TLS results
    '''

    #::: seeed
    np.random.seed(42)

    #::: handle inputs
    if flux_err is None:
        ind = np.where(~np.isnan(time * flux))[0]
        time = time[ind]
        flux = flux[ind]
    else:
        ind = np.where(~np.isnan(time * flux * flux_err))[0]
        time = time[ind]
        flux = flux[ind]
        flux_err = flux_err[ind]

    time_input = 1. * time
    flux_input = 1. * flux  #for plotting

    if wotan_kwargs is None:
        detrend = False
    else:
        detrend = True

        if 'slide_clip' not in wotan_kwargs: wotan_kwargs['slide_clip'] = {}
        if 'window_length' not in wotan_kwargs['slide_clip']:
            wotan_kwargs['slide_clip']['window_length'] = 1.
        if 'low' not in wotan_kwargs['slide_clip']:
            wotan_kwargs['slide_clip']['low'] = 20.
        if 'high' not in wotan_kwargs['slide_clip']:
            wotan_kwargs['slide_clip']['high'] = 3.

        if 'flatten' not in wotan_kwargs: wotan_kwargs['flatten'] = {}
        if 'method' not in wotan_kwargs['flatten']:
            wotan_kwargs['flatten']['method'] = 'biweight'
        if 'window_length' not in wotan_kwargs['flatten']:
            wotan_kwargs['flatten']['window_length'] = 1.
        #the rest is filled automatically by Wotan

    if tls_kwargs is None: tls_kwargs = {}
    if 'show_progress_bar' not in tls_kwargs:
        tls_kwargs['show_progress_bar'] = False
    if 'SNR_threshold' not in tls_kwargs: tls_kwargs['SNR_threshold'] = 5.
    if 'SDE_threshold' not in tls_kwargs: tls_kwargs['SDE_threshold'] = 5.
    if 'FAP_threshold' not in tls_kwargs: tls_kwargs['FAP_threshold'] = 0.05
    tls_kwargs_original = {
        key: tls_kwargs[key]
        for key in tls_kwargs.keys()
        if key not in ['SNR_threshold', 'SDE_threshold', 'FAP_threshold']
    }  #for the original tls
    #the rest is filled automatically by TLS

    if options is None: options = {}
    if 'show_plot' not in options: options['show_plot'] = False
    if 'save_plot' not in options: options['save_plot'] = False
    if 'outdir' not in options: options['outdir'] = ''

    #::: init
    SNR = 1e12
    SDE = 1e12
    FAP = 0
    FOUND_SIGNAL = False
    results_all = []
    if len(options['outdir']) > 0 and not os.path.exists(options['outdir']):
        os.makedirs(options['outdir'])

    #::: logprint
    with open(os.path.join(options['outdir'], 'logfile.log'), 'w') as f:
        f.write('TLS search, UTC ' +
                datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + '\n')
    logprint('\nWotan kwargs:', options=options)
    logpprint(wotan_kwargs, options=options)
    logprint('\nTLS kwargs:', options=options)
    logpprint(tls_kwargs, options=options)
    logprint('\nOptions:', options=options)
    logpprint(options, options=options)

    #::: apply a mask (if wished so)
    if known_transits is not None:
        for period, duration, T0 in zip(known_transits['period'],
                                        known_transits['duration'],
                                        known_transits['epoch']):
            time, flux, flux_err = mask(time, flux, flux_err, period, duration,
                                        T0)

    #::: global sigma clipping
    flux = sigma_clip(flux, sigma_upper=3, sigma_lower=20)

    #::: detrend (if wished so)
    if detrend:
        flux = slide_clip(time, flux, **wotan_kwargs['slide_clip'])
        flux, trend = flatten(time,
                              flux,
                              return_trend=True,
                              **wotan_kwargs['flatten'])

        if options['show_plot'] or options['save_plot']:
            fig, axes = plt.subplots(2, 1, figsize=(40, 8))
            axes[0].plot(time, flux_input, 'b.', rasterized=True)
            axes[0].plot(time, trend, 'r-', lw=2)
            axes[0].set(ylabel='Flux (input)', xticklabels=[])
            axes[1].plot(time, flux, 'b.', rasterized=True)
            axes[1].set(ylabel='Flux (detrended)', xlabel='Time (BJD)')
        if options['save_plot']:
            fig.savefig(os.path.join(
                options['outdir'],
                'flux_' + wotan_kwargs['flatten']['method'] + '.pdf'),
                        bbox_inches='tight')
            if options['show_plot']:
                plt.show(fig)
            else:
                plt.close(fig)

        X = np.column_stack((time, flux, flux_err, trend))
        np.savetxt(os.path.join(
            options['outdir'],
            'flux_' + wotan_kwargs['flatten']['method'] + '.csv'),
                   X,
                   delimiter=',',
                   header='time,flux_detrended,flux_err,trend')

        time_detrended = 1. * time
        flux_detrended = 1. * flux  #for plotting

    #::: search for the rest
    i = 0
    ind_trs = []
    while (SNR >= tls_kwargs['SNR_threshold']
           ) and (SDE >= tls_kwargs['SDE_threshold']) and (
               FAP <= tls_kwargs['FAP_threshold']) and (FOUND_SIGNAL == False):

        model = tls(time, flux, flux_err)
        results = model.power(**tls_kwargs_original)

        if (results.snr >= tls_kwargs['SNR_threshold']) and (
                results.SDE >= tls_kwargs['SDE_threshold']) and (
                    results.FAP <= tls_kwargs['FAP_threshold']):

            #::: calculcate the correct_duration, as TLS sometimes returns unreasonable durations
            ind_tr_phase = np.where(results['model_folded_model'] < 1.)[0]
            correct_duration = results['period'] * (
                results['model_folded_phase'][ind_tr_phase[-1]] -
                results['model_folded_phase'][ind_tr_phase[0]])

            #::: mark transit
            ind_tr, ind_out = index_transits(time_input, results['T0'],
                                             results['period'],
                                             correct_duration)
            ind_trs.append(ind_tr)

            #::: mask out detected transits and append results
            time1, flux1 = time, flux  #for plotting
            time, flux, flux_err = mask(time, flux, flux_err, results.period,
                                        np.max((1.5 * correct_duration)),
                                        results.T0)
            results_all.append(results)

            #::: write TLS stats to file
            with open(
                    os.path.join(options['outdir'],
                                 'tls_signal_' + str(i) + '.txt'),
                    'wt') as out:
                pprint(results, stream=out)

            #::: individual TLS plots
            if options['show_plot'] or options['save_plot']:
                fig = plt.figure(figsize=(20, 8), tight_layout=True)
                gs = fig.add_gridspec(2, 3)

                ax = fig.add_subplot(gs[0, :])
                ax.plot(time1, flux1, 'b.', rasterized=True)
                ax.plot(results['model_lightcurve_time'],
                        results['model_lightcurve_model'],
                        'r-',
                        lw=3)
                ax.set(xlabel='Time (BJD)', ylabel='Flux')

                ax = fig.add_subplot(gs[1, 0])
                ax.plot(results['folded_phase'],
                        results['folded_y'],
                        'b.',
                        rasterized=True)
                ax.plot(results['model_folded_phase'],
                        results['model_folded_model'],
                        'r-',
                        lw=3)
                ax.set(xlabel='Phase', ylabel='Flux')

                ax = fig.add_subplot(gs[1, 1])
                ax.plot(
                    (results['folded_phase'] - 0.5) * results['period'] * 24,
                    results['folded_y'],
                    'b.',
                    rasterized=True)
                ax.plot((results['model_folded_phase'] - 0.5) *
                        results['period'] * 24,
                        results['model_folded_model'],
                        'r-',
                        lw=3)
                ax.set(xlim=[
                    -1.5 * correct_duration * 24, +1.5 * correct_duration * 24
                ],
                       xlabel='Time (h)',
                       yticks=[])

                ax = fig.add_subplot(gs[1, 2])
                ax.text(.02,
                        0.95,
                        'P = ' +
                        np.format_float_positional(results['period'], 4) +
                        ' d',
                        ha='left',
                        va='center',
                        transform=ax.transAxes)
                ax.text(
                    .02,
                    0.85,
                    'Depth = ' +
                    np.format_float_positional(1e3 *
                                               (1. - results['depth']), 4) +
                    ' ppt',
                    ha='left',
                    va='center',
                    transform=ax.transAxes)
                ax.text(.02,
                        0.75,
                        'Duration = ' +
                        np.format_float_positional(24 * correct_duration, 4) +
                        ' h',
                        ha='left',
                        va='center',
                        transform=ax.transAxes)
                ax.text(.02,
                        0.65,
                        'T_0 = ' +
                        np.format_float_positional(results['T0'], 4) + ' d',
                        ha='left',
                        va='center',
                        transform=ax.transAxes)
                ax.text(.02,
                        0.55,
                        'SNR = ' +
                        np.format_float_positional(results['snr'], 4),
                        ha='left',
                        va='center',
                        transform=ax.transAxes)
                ax.text(.02,
                        0.45,
                        'SDE = ' +
                        np.format_float_positional(results['SDE'], 4),
                        ha='left',
                        va='center',
                        transform=ax.transAxes)
                ax.text(.02,
                        0.35,
                        'FAP = ' +
                        np.format_float_scientific(results['FAP'], 4),
                        ha='left',
                        va='center',
                        transform=ax.transAxes)
                ax.set_axis_off()
                if options['save_plot']:
                    fig.savefig(os.path.join(options['outdir'],
                                             'tls_signal_' + str(i) + '.pdf'),
                                bbox_inches='tight')
                if options['show_plot']:
                    plt.show(fig)
                else:
                    plt.close(fig)

        SNR = results.snr
        SDE = results.SDE
        FAP = results.FAP
        i += 1

    #::: full lightcurve plot
    if options['show_plot'] or options['save_plot']:

        if detrend:
            fig, axes = plt.subplots(2, 1, figsize=(40, 8), tight_layout=True)
            ax = axes[0]
            ax.plot(time_input,
                    flux_input,
                    'k.',
                    color='grey',
                    rasterized=True)
            ax.plot(time_input, trend, 'r-', lw=2)
            for number, ind_tr in enumerate(ind_trs):
                ax.plot(time_input[ind_tr],
                        flux_input[ind_tr],
                        marker='.',
                        linestyle='none',
                        label='signal ' + str(number))
            ax.set(ylabel='Flux (input)', xticklabels=[])
            ax.legend()

            ax = axes[1]
            ax.plot(time_detrended,
                    flux_detrended,
                    'k.',
                    color='grey',
                    rasterized=True)
            for number, ind_tr in enumerate(ind_trs):
                ax.plot(time_detrended[ind_tr],
                        flux_detrended[ind_tr],
                        marker='.',
                        linestyle='none',
                        label='signal ' + str(number))
            ax.set(ylabel='Flux (detrended)', xlabel='Time (BJD)')
            ax.legend()

        else:
            fig = plt.figure(figsize=(20, 4), tight_layout=True)
            fig, ax = plt.subplots(1, 1, figsize=(40, 4))
            ax.plot(time_input,
                    flux_input,
                    'k.',
                    color='grey',
                    rasterized=True)
            ax.set(ylabel='Flux (input)', xlabel='Time (BJD)')
            for number, ind_tr in enumerate(ind_trs):
                ax.plot(time_input[ind_tr],
                        flux_input[ind_tr],
                        marker='.',
                        linestyle='none',
                        label='signal ' + str(number))
            ax.legend()

        if options['save_plot']:
            fig.savefig(os.path.join(options['outdir'], 'tls_signal_all.pdf'),
                        bbox_inches='tight')
        if options['show_plot']:
            plt.show(fig)
        else:
            plt.close(fig)

    return results_all
Пример #7
0
def match_ztf_message(job_info,
                      message_body,
                      message_time_epoch,
                      time_delta=10,
                      new_email_matching=False,
                      angular_separation=2):
    '''
    Check if the given email matches the information passed from the log file via job_info.
    If a similar request was passed prior to the submission, you may need to use the 
    only_require_new_email parameter because the information won't exactly match.
    In this case, look only for a close position, the relevant body text, and ensure that the 
    email was sent after the request.
    '''

    match = False

    #
    # Only continue if the message was received AFTER the job was submitted
    #
    if message_time_epoch < job_info['cdatetime'].to_list()[0]:

        return match

    message_lines = message_body.splitlines()

    for line in message_lines:

        #
        # Incomplete data product
        #
        if re.search("A request similar to yours is waiting to be processed",
                     line):
            match = False
            break  # Stop early if this isn't a finished data product

        if re.search("reqid", line):

            inputs = line.split('(')[-1]

            # Two ways
            # Processing has completed for reqid=XXXX ()
            test_ra = inputs.split('ra=')[-1].split(',')[0]
            test_decl = inputs.split('dec=')[-1].split(')')[0]
            if re.search('minJD', line) and re.search('maxJD', line):
                test_minjd = inputs.split('minJD=')[-1].split(',')[0]
                test_maxjd = inputs.split('maxJD=')[-1].split(',')[0]
            else:
                test_minjd = inputs.split('startJD=')[-1].split(',')[0]
                test_maxjd = inputs.split('endJD=')[-1].split(',')[0]

            if new_email_matching:

                # Call this a match only if parameters match
                if np.format_float_positional(float(test_ra), precision=6, pad_right=6).replace(' ','0') == job_info['ra'].to_list()[0] and \
                   np.format_float_positional(float(test_decl), precision=6, pad_right=6).replace(' ','0') == job_info['dec'].to_list()[0] and \
                   (np.format_float_positional(float(test_minjd), precision=6, pad_right=6).replace(' ','0') == job_info['jdstart'].to_list()[0] and \
                   np.format_float_positional(float(test_maxjd), precision=6, pad_right=6).replace(' ','0') == job_info['jdend'].to_list()[0]) or ( \
                       float(test_minjd) - time_delta < float(job_info['jdstart'].to_list()[0]) and float(test_minjd) + time_delta > float(job_info['jdstart'].to_list()[0]) and \
                       float(test_maxjd) - time_delta < float(job_info['jdend'].to_list()[0]) and float(test_maxjd) + time_delta > float(job_info['jdend'].to_list()[0])):

                    match = True

            else:

                # Check if new and positions are similar
                submitted_skycoord = SkyCoord(job_info["ra"],
                                              job_info["dec"],
                                              frame='icrs',
                                              unit='deg')
                email_skycoord = SkyCoord(test_ra,
                                          test_decl,
                                          frame='icrs',
                                          unit='deg')
                if submitted_skycoord.separation(email_skycoord).arcsecond < angular_separation and \
                    message_time_epoch > job_info['cdatetime'].to_list()[0]:

                    match = True

    return match
Пример #8
0
 def get_avg_daily_ac_power(self):
     total = self.ac_power.values.sum()
     avg = total / self.forecast_length
     return np.format_float_positional(avg, precision=3)
Пример #9
0
def draw_basemap(fig, ax, df_lon, df_lat, list_season, seasons, bathy, gs, ss,
                 milo, malo, mila, mala, cont, which_method, gridsize):
    # if bathy=True, plot bathy
    ax = fig.sca(ax)
    m = Basemap(projection='stere',lon_0=(milo+malo)/2,lat_0=(mila+mala)/2,lat_ts=0,llcrnrlat=mila,urcrnrlat=mala,\
                llcrnrlon=milo,urcrnrlon=malo,rsphere=6371200.,resolution='l',area_thresh=100)
    #m.drawcoastlines()
    m.fillcontinents(color='grey', lake_color='grey')
    if (ax == ax1) or (ax == ax3):  #draw parallels
        parallels = np.arange(mila, mala, 3)
        m.drawparallels(parallels,
                        labels=[1, 0, 0, 0],
                        fontsize=10,
                        linewidth=0.0)
    if (ax == ax3) or (ax == ax4):  # draw meridians
        meridians = np.arange(milo + 2, malo, 4)
        m.drawmeridians(meridians,
                        labels=[0, 0, 0, 1],
                        fontsize=10,
                        linewidth=0.0)
    if which_method == 'scatter':
        a = m.scatter(df_lon,
                      df_lat,
                      c=list_season,
                      cmap='coolwarm',
                      marker='o',
                      linewidths=0.01,
                      latlon=True)
    else:
        print('binning data')
        xi = np.arange(milo, malo, gridsize)  # longitude grid
        yi = np.arange(mila, mala, gridsize)  # latitude grid
        X, Y, Z = sh_bindata(df_lon, df_lat, list_season, xi, yi)
        X, Y = m(X.flatten(), Y.flatten())
        print('finally adding scatter plot ')
        a = m.scatter(X, Y, c=Z, cmap='coolwarm', marker='o', linewidths=0.01)
        a.set_clim(-1 * maxdiff_ignore, maxdiff_ignore)
    if bathy == True:  # get some detail bathymetry from USGS
        add_isobath(m, gs, ss, cont)
    mx, my = m(milo + .2, mala - .5)
    #plt.text(mx+50000,my-110000,seasons,color='w',fontsize=18,fontweight='bold')
    plt.text(mx, my, seasons, color='w', fontsize=18, fontweight='bold')
    plt.text(mx,
             my - 100000,
             'mean=' + str(
                 np.format_float_positional(np.mean(list_season),
                                            precision=2,
                                            unique=False,
                                            fractional=False)),
             color='w',
             fontsize=14,
             fontweight='bold')
    plt.text(mx,
             my - 180000,
             'RMS = ' + str(
                 np.format_float_positional(np.sqrt(
                     np.mean(np.square(list_season))),
                                            precision=2,
                                            unique=False,
                                            fractional=False)),
             color='w',
             fontsize=14,
             fontweight='bold')
    return a
Пример #10
0
 def test_format_float_positional_python_scalar(self):
     x = 1.0
     assert cupy.format_float_positional(
         x) == numpy.format_float_positional(x)
Пример #11
0
 def test_format_float_positional(self):
     a = testing.shaped_arange((1, ), cupy)
     b = testing.shaped_arange((1, ), numpy)
     assert cupy.format_float_positional(
         a) == numpy.format_float_positional(b)
Пример #12
0
 def format_float(x):
     formatted = numpy.format_float_positional(x, unique=True)
     if formatted[-1] == '.':
         return formatted + '0'
     return formatted
Пример #13
0
                  positive_scalar_factor=1,
                  negative_scalar_factor=1)
 grid.run_value_iteration()
 reward = grid.reward[1:4, 1:5]
 value = grid.state_value_function[1:4, 1:5]
 policy = grid.policy[1:4, 1:5]
 print("State Value function = \n", value)
 print("Action Value function = \n", grid.action_value_function[1:4,
                                                                1:5, :])
 print("Policy = \n", policy)
 fig1 = plt.figure(1, figsize=(16, 6))
 ax1 = fig1.add_subplot(121)
 im1 = ax1.imshow(policy, cmap='rainbow')
 for (j, i), label in np.ndenumerate(policy):
     if label == -1:
         label_text = np.format_float_positional(reward[j, i], precision=3)
     elif label == 0:
         label_text = '\u27a1'
     elif label == 1:
         label_text = '\u2b07'
     elif label == 2:
         label_text = '\u2b05'
     else:
         label_text = '\u2b06'
     ax1.text(i, j, label_text, ha='center', va='center', fontsize=28)
 ax2 = fig1.add_subplot(122)
 im2 = ax2.imshow(value, cmap='rainbow')
 for (j, i), label in np.ndenumerate(value):
     label_text = np.format_float_positional(label, precision=3)
     ax2.text(i, j, label_text, ha='center', va='center', fontsize=28)
 # plt.title('Reward and state value function', x=0, y=1.1, fontdict = {'fontsize' : 15})
Пример #14
0
        drawnow(makeFig)  # plot average after window

        ser = None  # Closes port
        print('Returning to Main Menu...')
        state = 0

    elif state == 3:
        print('-------------Help-------------')
        print(
            'This program can receive/send at a certain baudrate information received using serial protocol'
        )
        print(
            'To change baudrate and port, please change BAUD and COM respectively in code'
        )
        print('Returning to Main Menu...')
        state = 0

    elif state == 1:
        coeff = list(filterDesign())
        for i in range(Order):
            coeff[i] = np.format_float_positional(coeff[i])
        state = 5

    elif state == 4:
        state = -1
        ser = None  # Closes port

    else:
        print('Command not recognized')
        print('Returning to Main Menu...')
Пример #15
0
# rename (year rpp_all_items) (rpp_all_items year)
# save "$int_data/rpp_msa_long.dta", replace
bea_msa_year_list = [int(x) for x in bea_rpp_msa.columns.to_list() if x.isdigit()]
bea_rpp_msa_processed = pd.DataFrame([], columns=['MSATitle', 'year', 'rpp_all_items', 'met2013'], dtype='object')
for geo_name in bea_rpp_msa.loc[:, 'GeoName'].unique():
  for year in bea_msa_year_list:
    raw_val = bea_rpp_msa[bea_rpp_msa['GeoName']==geo_name][str(year)].values[0]
    val = None
    if type(raw_val) == str:
      try: 
        val = float(raw_val)
      except:
        bea_rpp_msa_processed = bea_rpp_msa_processed[bea_rpp_msa_processed['MSATitle'] != geo_name]
        break 
    else: 
      val = np.format_float_positional(raw_val)
    met2013 = bea_rpp_msa[bea_rpp_msa['GeoName']==geo_name]['GeoFIPS'].values[0].replace('"', '')
    bea_rpp_msa_processed = bea_rpp_msa_processed.append({
      "MSATitle": geo_name,
      "year": year,
      "rpp_all_items": val,
      "met2013": met2013
    }, ignore_index=True)
bea_rpp_msa_processed.loc[:, 'year'] = bea_rpp_msa_processed['year'].astype('int32')
bea_rpp_msa_processed.loc[:, 'met2013'] = bea_rpp_msa_processed['met2013'].astype('int32')
bea_rpp_msa_processed.loc[:, 'rpp_all_items'] = bea_rpp_msa_processed['rpp_all_items'].astype('float64')
bea_rpp_msa_processed.to_stata(os.path.join(int_data, 'rpp_msa_long_test.dta'), write_index=False)


# /*******************
# * Main Analysis
Пример #16
0
def output_edges_(edge, length, resolution):
    str_edges = '<edges>\n'

    r = length / (2 * pi)
    edgelen = length / 4.

    edges = [{
        "id":
        "bottom",
        "type":
        "edgeType",
        "from":
        "bottom",
        "to":
        "right",
        "length":
        edgelen,
        "shape":
        [(r * cos(t), r * sin(t)) for t in linspace(-pi / 2, 0, resolution)]
    }, {
        "id":
        "right",
        "type":
        "edgeType",
        "from":
        "right",
        "to":
        "top",
        "length":
        edgelen,
        "shape":
        [(r * cos(t), r * sin(t)) for t in linspace(0, pi / 2, resolution)]
    }, {
        "id":
        "top",
        "type":
        "edgeType",
        "from":
        "top",
        "to":
        "left",
        "length":
        edgelen,
        "shape":
        [(r * cos(t), r * sin(t)) for t in linspace(pi / 2, pi, resolution)]
    }, {
        "id":
        "left",
        "type":
        "edgeType",
        "from":
        "left",
        "to":
        "bottom",
        "length":
        edgelen,
        "shape": [(r * cos(t), r * sin(t))
                  for t in linspace(pi, 3 * pi / 2, resolution)]
    }]
    for e in edges:
        shape = ""
        for t in range(len(e['shape'])):
            shape += '%s,%s' % (
                np.format_float_positional(e['shape'][t][0], trim='-'),
                np.format_float_positional(e['shape'][t][1], trim='-'))
            if t < len(e['shape']) - 1:
                shape += ' '
        str_edges += get_edge_str_(edge, e['id'], e['from'], e['to'], 'a',
                                   shape)
    str_edges += '</edges>\n'
    print('Number of edges: ', len(edges))
    return str_edges
Пример #17
0
def obtusify(val, idims, SI, pref, loops = 2, maxvalord = None, minvalord = None, maxprefs = None, spread = None):
    '''
    Given a quantity in SI base units, returns an equivalent value in an obtuse combination of
      derived SI units and prefixes

    Args:
      val   -- float : numerical value of quantity in base units
      idims -- (4,) : tuple corresponding to dimensionality of input quantity (M,L,T,I)
      SI    -- (n,5) struct ndarray : information on SI derived units, see config
      pref  -- (n,2) struct ndarray : indicating a set of SI prefixes to superfluously add

    Kwargs:
      loops       -- int : Number of derived units to be present in the obfuscation. Set it irresponsibly high!
      maxvalord   -- int : If not None, maximum order of output numeric value
      minvalord   -- int : If not None, minimum order of output numeric value
      maxprefs    -- int : If not None, maximum number of SI prefixes to include in output
      spread      -- float [0,1) : If not None, creates a bias towards including more individual unit names
                       in the output (vs. less unique names with higher powers). Strong interaction with loops.

    Returns:
      ostring -- string : quantity equal to input, expressed obtusely

    Sample Usage:
        # Obtusification of 8024 metres
        obtusify(8024, (0,1,0,0), config.SI, config.pref) --> "8.024 microsievert-kilograms per nanonewton"
    '''

    #Basic input checks
    if maxvalord == None: maxvalord =  np.inf
    if minvalord == None: minvalord = -np.inf
    if maxvalord - minvalord < 0:
        print("You set the output value order minimum higher than the maximum. Don't think I don't see you trying to crash the prefix algorithm.")
        raise Exception

    #Pick a random non-zero positive dimension
    ind = random.choice([i for i,x in enumerate(idims) if x > 0])

    #Pick a random SI derived unit which is nonzero positive in this dimension (ensures at least one unit on numerator)
    SIind = random.choice([i for i,x in enumerate(SI['dims']) if x[ind] > 0])
    SI['count'][SIind] += 1

    #Check which dimensions remain to be filled
    wSI = [np.array(x)*y for x,y in zip(SI['dims'], SI['count'])]
    remainder = idims - np.sum(wSI, axis=0)


    #Loop while dimensions are not matched
    i = 0
    while i < loops:
        i += 1

        #Find the derived unit which would most decrease the remainder if added to the numerator
        diffs = np.sum(np.abs([list(x) - remainder for x in SI['dims']]), axis=1).astype(np.float32)
        diffs[SI['count'] < 0] = np.nan #Ignore options which would simply cancel out existing units
        n,ndiff = np.where(diffs == np.nanmin(diffs))[0][0], np.nanmin(diffs)
        #Find the derived unit which would most decrease the remainder if added to the denominator
        diffs = np.sum(np.abs([list(x) + remainder for x in SI['dims']]), axis=1).astype(np.float32)
        diffs[SI['count'] > 0] = np.nan
        d,ddiff = np.where(diffs == np.nanmin(diffs))[0][0], np.nanmin(diffs) #when added to the denominator

        #Sometimes selecting the unit which best moves the dimensionality towards the target actually produces
        #  lackluster results, e.g. feedback loops of two units undoing eachother around the target.
        #These options are ways to introduce bias for more interesting results
        if spread is not None and loops - i > 3:
            #Adds a chance to instead select a random unused unit if possible
            if np.random.rand() > spread and 0 in SI['count']:
                uu = random.choice([i for i,x in enumerate(SI['count']) if x == 0])
                if np.random.rand() > .5:
                    SI['count'][uu] += 1 #Slap it on the numerator
                else:
                    SI['count'][uu] -= 1 #Slap it on the denominator
        else:
            if ndiff <= ddiff:
                SI['count'][n] += 1
            else:
                SI['count'][d] -= 1

        #Check which dimensions remain to be filled
        wSI = [np.array(x)*y for x,y in zip(SI['dims'], SI['count'])]
        remainder = idims - np.sum(wSI, axis=0)

    #Assign SI prefixes
    nslots = np.sum(SI['count'] > 0) + np.sum(remainder > 0) #Number of slots available on numerator
    dslots = np.sum(SI['count'] < 0) + np.sum(remainder < 0) #Number of slots available on denominator
    pn = np.full(nslots, None)
    pd = np.full(dslots, None)

    #Bound the number of prefixes to be added
    if maxprefs is not None:
        nempty, dempty = np.sum(pn == None), np.sum(pd == None)
        while nempty + dempty > maxprefs:
            #Fill a random slot with an empty prefix (uniform selection across all slots)
            ratio = nempty/(nempty + dempty)
            if np.random.rand() < ratio:
                i = [i for i,x in enumerate(pn) if x == None]
                pn[random.choice(i)] = 0
            else:
                i = [i for i,x in enumerate(pd) if x == None]
                pd[random.choice(i)] = 0
            nempty, dempty = np.sum(pn == None), np.sum(pd == None)

    #Assign arbitrary prefixes to open slots slots, avoiding dupes
    for i in [i for i,x in enumerate(pn) if x == None]:
        pn[i] = random.choice(pref['mag'])
    for i in [i for i,x in enumerate(pd) if x == None]:
        pd[i] = random.choice(pref['mag'])

    #Unit exponents (and thus prefix powers)
    en =  [ i for i in SI['count'][SI['count'] > 0]] #Derived units
    en += [ i for i in remainder if i > 0]           #Base units
    ed =  [-i for i in SI['count'][SI['count'] < 0]] #Derived units
    ed += [-i for i in remainder if i < 0]           #Base units

    #Lower/raise prefixes until within user tolerance
    current_ord = sum([p*e for p,e in zip(pn,en)]) - sum([p*e for p,e in zip(pd,ed)]) - np.floor(np.log10(val))
    ratio = len(pn)/ (len(pn) + len(pd))
    numprobs = [e if p!=0 else 0 for e,p in zip(en,pn)] #Favour modifying high power values (no one wnats to see "yoctometres octed" (I kinda do though))
    numprobs = np.array(numprobs)/np.sum(np.abs(numprobs)) #  Additionally, don't select empty prefixes for change to prevent conflict with maxprefs
    denprobs = [e if p!=0 else 0 for e,p in zip(ed,pd)] #Favour modifying high power values (no one wnats to see "yoctometres octed" (I kinda do though))
    denprobs = np.array(denprobs)/np.sum(np.abs(denprobs)) #  Additionally, don't select empty prefixes for change to prevent conflict with maxprefs
    t = 0
    while current_ord > maxvalord or current_ord < minvalord:
        t+=1
        if t>100:
            print("Error, unable to meet user tolerance for output value magnitude within %i iteration attempts"%t)
            print("Keywords minvalord/maxvalord most likely define an extremely narrow range")
            raise Exception()
        if current_ord > maxvalord:
            if np.random.rand() < ratio:
                #Go down one prefix in a numerator slot
                i = np.random.choice(range(len(pn)), p=numprobs)
                newprefs = pref['mag'][pref['mag'] < pn[i]]
                if len(newprefs) == 0: continue #If there is no such prefix, reiterate
                pn[i] = min(newprefs, key=lambda x:abs(x-pn[i]))
            else:
                #Go up on prefix in a denominator slot
                i = np.random.choice(range(len(pd)), p=denprobs)
                newprefs = pref['mag'][pref['mag'] > pd[i]]
                if len(newprefs) == 0: continue
                pd[i] = min(newprefs, key=lambda x:abs(x-pd[i]))
        else:
            if np.random.rand() < ratio:
                #Go up one prefix in a numerator slot
                i = np.random.choice(range(len(pn)), p=numprobs)
                newprefs = pref['mag'][pref['mag'] > pn[i]]
                if len(newprefs) == 0: continue
                pn[i] = min(newprefs, key=lambda x:abs(x-pn[i]))
            else:
                #Go down on prefix in a denominator slot
                i = np.random.choice(range(len(pd)), p=denprobs)
                newprefs = pref['mag'][pref['mag'] < pd[i]]
                if len(newprefs) == 0: continue
                pd[i] = min(newprefs, key=lambda x:abs(x-pd[i]))
        current_ord = sum([p*e for p,e in zip(pn,en)]) - sum([p*e for p,e in zip(pd,ed)]) - np.floor(np.log10(val))

    #Calculate new numerical value of quantity
    oval = val / (10**np.floor(np.log10(val))) * 10**-current_ord

    #Convert numerical prefix orders to their corresponding strings
    pn = [pref['name'][pref['mag'] == i][0] for i in pn]
    pd = [pref['name'][pref['mag'] == i][0] for i in pd]
    if len(pd) > 0: pd[0] = ' per ' + pd[0]


    #Convert exponent values to their corresponding strings
    enames = {1:'', 2:'s-squared', 3:'s-cubed', 4:'s-quarted', 5: 's-quinted', 6: 's-sexted', 7: 's-hepted', 8: 's-octed',
              9: 'nonned????', 10: 'dude', 11: 'stop', 12: 'the', 13 : 'words', 14: ' don\'t', 15: 'even', 16: 'go', 17: 'this', 18:'high'} #If you get a key error on this you requested a BEEG obfuscation
    en = [enames[i] for i in en]
    ed = [enames[i] for i in ed]

    #Unit strings
    bnames = {0:'kilogram', 1: 'metre', 2: 'second', 3: 'ampere'}
    un =  list(SI['uname'][SI['count'] > 0])                     #Derived units
    un += [bnames[i] for i,x in enumerate(remainder) if x > 0]   #Base units
    ud =  list(SI['uname'][SI['count'] < 0])
    ud += [bnames[i] for i,x in enumerate(remainder) if x < 0]

    #Construct final value string
    ustring = ''
    for p,u,e in zip(pn, un, en):
        ustring += '-%s%s%s'%(p,u,e)
    ustring += 's' #Pluralize
    for p,u,e in zip(pd, ud, ed):
        ustring += '%s%s%s-'%(p,u,e)
    ustring = ustring[1:] #Chop off superfluous hyphens
    if len(pd) != 0:
        ustring = ustring[:-1]

    #Round oval for readability
    oval  = np.format_float_positional(oval, precision=4, unique=False, fractional=False, trim='k')
    ostring = str(oval) + ' ' + ustring

    #Cleanup plurals
    ostring = ostring.replace('hertzs', 'hertz')
    ostring = ostring.replace('henrys', 'henries')

    return ostring
Пример #18
0
    def run(self):

        model_name = self.transaction.persistent_model_metadata.model_name
        self.train_start_time = time.time()
        self.session.logging.info(
            'Training: model {model_name}, epoch 0'.format(
                model_name=model_name))

        self.last_time = time.time()

        # We moved everything to a worker so we can run many of these in parallel
        # Todo: use Ray https://github.com/ray-project/tutorial

        ret_diffs = PredictWorker.start(self.transaction.model_data,
                                        model_name=model_name)

        confusion_matrices = self.transaction.persistent_ml_model_info.confussion_matrices

        self.transaction.output_data.columns = self.transaction.input_data.columns
        # TODO: This may be inneficient, try to work on same pointer
        self.transaction.output_data.data_array = self.transaction.input_data.data_array
        self.transaction.output_data.predicted_columns = self.transaction.metadata.model_predict_columns
        for diff in ret_diffs:
            for col in diff['ret_dict']:
                confusion_matrix = confusion_matrices[col]
                col_index = self.transaction.input_data.columns.index(col)
                self.transaction.output_data.columns.insert(
                    col_index + 1, KEY_CONFIDENCE)
                offset = diff['start_pointer']
                group_pointer = diff['group_pointer']
                column_pointer = diff['column_pointer']
                for j, cell in enumerate(diff['ret_dict'][col]):
                    #TODO: This may be calculated just as j+offset
                    if not cell:
                        continue
                    actual_row = self.transaction.model_data.predict_set_map[
                        group_pointer][j + offset]
                    if not self.transaction.output_data.data_array[actual_row][
                            col_index] or self.transaction.output_data.data_array[
                                actual_row][col_index] == '':

                        if self.transaction.persistent_model_metadata.column_stats[
                                col][KEYS.DATA_TYPE] == DATA_TYPES.NUMERIC:
                            target_val = np.format_float_positional(
                                cell, precision=2)
                        else:
                            target_val = cell
                        self.transaction.output_data.data_array[actual_row][
                            col_index] = target_val
                        confidence = self.getConfidence(cell, confusion_matrix)
                        #randConfidence = random.uniform(0.85, 0.93)

                        self.transaction.output_data.data_array[
                            actual_row].insert(col_index + 1, confidence)
                    else:
                        self.transaction.output_data.data_array[
                            actual_row].insert(col_index + 1, 1.0)

        total_time = time.time() - self.train_start_time
        self.session.logging.info(
            'Trained: model {model_name} [OK], TOTAL TIME: {total_time:.2f} seconds'
            .format(model_name=model_name, total_time=total_time))

        pass
Пример #19
0
def cross_validation(
    estimator,
    estimator_name,
    X,
    y,
    metrics,
) -> List[str]:
    """Run cross validation to the chosen estimator.

    :param estimator: object to which apply fit and predict, to obtain results.
    :param estimator_name: name of the chosen estimator.
    :param X: dataset values without the labels.
    :param y: dataset labels.
    :param metrics: metrics which will be applied to the estimator.
    :returns: a dictionary with the obtained results.

    """
    # pylint: disable=too-many-locals,invalid-name,redefined-outer-name
    DGPLOGGER.debug("Training %s...", estimator_name)
    DGPLOGGER.debug("Estimator: %s", str(estimator))
    result = {"estimator": estimator_name}
    result["fit_times"] = np.zeros((N_SPLITS, ))
    for metric_name in metrics.keys():
        result[f"train_{metric_name}"] = np.zeros((N_SPLITS, ))
        result[f"test_{metric_name}"] = np.zeros((N_SPLITS, ))

    for fold, (train_idx, test_idx) in enumerate(K_FOLD.split(X=X, y=y)):
        DGPLOGGER.info("Fold %d/%d", fold + 1, N_SPLITS)

        X_train, y_train = X[train_idx], y[train_idx]
        X_test, y_test = X[test_idx], y[test_idx]

        DGPLOGGER.debug("Estimator fit...")
        start_time = time.perf_counter()
        estimator.fit(X_train, y_train)
        fit_time = time.perf_counter() - start_time
        DGPLOGGER.debug("Estimator fit... Done in %.5f seconds", fit_time)

        result["fit_times"][fold] = fit_time
        DGPLOGGER.debug("Predicting train labels...")
        y_pred_train = estimator.predict(X_train)
        DGPLOGGER.debug("Predicting test labels...")
        y_pred_test = estimator.predict(X_test)

        for metric_name, (func, kwargs) in metrics.items():
            DGPLOGGER.debug("Obtaining train metric: %s...", metric_name)
            result[f"train_{metric_name}"][fold] = func(
                y_train, y_pred_train, **kwargs)
            DGPLOGGER.debug(
                "Obtaining train metric: %s... Done: %s",
                metric_name,
                result[f"train_{metric_name}"][fold],
            )
            DGPLOGGER.debug("Obtaining test metric: %s...", metric_name)
            result[f"test_{metric_name}"][fold] = func(y_test, y_pred_test,
                                                       **kwargs)
            DGPLOGGER.debug(
                "Obtaining test metric: %s... Done: %s",
                metric_name,
                result[f"test_{metric_name}"][fold],
            )

    DGPLOGGER.debug("Training %s... Done", estimator_name)

    output_list = [
        result.pop("estimator"),
        np.format_float_positional(result.pop("fit_times").mean(),
                                   precision=2,
                                   unique=False),
    ]
    for metric_name in sorted(metrics.keys()):
        output_list.append(
            np.format_float_positional(
                result[f"train_{metric_name}"].mean(),
                precision=6,
                unique=False,
            ))
        output_list.append(
            np.format_float_positional(result[f"train_{metric_name}"].std(),
                                       precision=6,
                                       unique=False))
        output_list.append(
            np.format_float_positional(result[f"test_{metric_name}"].mean(),
                                       precision=6,
                                       unique=False))
        output_list.append(
            np.format_float_positional(result[f"test_{metric_name}"].std(),
                                       precision=6,
                                       unique=False))

    return output_list
Пример #20
0
    def test_dragon4(self):
        # these tests are adapted from Ryan Juckett's dragon4 implementation,
        # see dragon4.c for details.

        fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k)
        fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k)
        fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k)
        fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k)

        preckwd = lambda prec: {"unique": False, "precision": prec}

        assert_equal(fpos32("1.0"), "1.")
        assert_equal(fsci32("1.0"), "1.e+00")
        assert_equal(fpos32("10.234"), "10.234")
        assert_equal(fpos32("-10.234"), "-10.234")
        assert_equal(fsci32("10.234"), "1.0234e+01")
        assert_equal(fsci32("-10.234"), "-1.0234e+01")
        assert_equal(fpos32("1000.0"), "1000.")
        assert_equal(fpos32("1.0", precision=0), "1.")
        assert_equal(fsci32("1.0", precision=0), "1.e+00")
        assert_equal(fpos32("10.234", precision=0), "10.")
        assert_equal(fpos32("-10.234", precision=0), "-10.")
        assert_equal(fsci32("10.234", precision=0), "1.e+01")
        assert_equal(fsci32("-10.234", precision=0), "-1.e+01")
        assert_equal(fpos32("10.234", precision=2), "10.23")
        assert_equal(fsci32("-10.234", precision=2), "-1.02e+01")
        assert_equal(
            fsci64("9.9999999999999995e-08", **preckwd(16)), "9.9999999999999995e-08"
        )
        assert_equal(
            fsci64("9.8813129168249309e-324", **preckwd(16)), "9.8813129168249309e-324"
        )
        assert_equal(
            fsci64("9.9999999999999694e-311", **preckwd(16)), "9.9999999999999694e-311"
        )

        # test rounding
        # 3.1415927410 is closest float32 to np.pi
        assert_equal(fpos32("3.14159265358979323846", **preckwd(10)), "3.1415927410")
        assert_equal(
            fsci32("3.14159265358979323846", **preckwd(10)), "3.1415927410e+00"
        )
        assert_equal(fpos64("3.14159265358979323846", **preckwd(10)), "3.1415926536")
        assert_equal(
            fsci64("3.14159265358979323846", **preckwd(10)), "3.1415926536e+00"
        )
        # 299792448 is closest float32 to 299792458
        assert_equal(fpos32("299792458.0", **preckwd(5)), "299792448.00000")
        assert_equal(fsci32("299792458.0", **preckwd(5)), "2.99792e+08")
        assert_equal(fpos64("299792458.0", **preckwd(5)), "299792458.00000")
        assert_equal(fsci64("299792458.0", **preckwd(5)), "2.99792e+08")

        assert_equal(
            fpos32("3.14159265358979323846", **preckwd(25)),
            "3.1415927410125732421875000",
        )
        assert_equal(
            fpos64("3.14159265358979323846", **preckwd(50)),
            "3.14159265358979311599796346854418516159057617187500",
        )
        assert_equal(fpos64("3.14159265358979323846"), "3.141592653589793")

        # smallest numbers
        assert_equal(
            fpos32(0.5 ** (126 + 23), unique=False, precision=149),
            "0.00000000000000000000000000000000000000000000140129846432"
            "4817070923729583289916131280261941876515771757068283889791"
            "08268586060148663818836212158203125",
        )
        assert_equal(
            fpos64(0.5 ** (1022 + 52), unique=False, precision=1074),
            "0.00000000000000000000000000000000000000000000000000000000"
            "0000000000000000000000000000000000000000000000000000000000"
            "0000000000000000000000000000000000000000000000000000000000"
            "0000000000000000000000000000000000000000000000000000000000"
            "0000000000000000000000000000000000000000000000000000000000"
            "0000000000000000000000000000000000049406564584124654417656"
            "8792868221372365059802614324764425585682500675507270208751"
            "8652998363616359923797965646954457177309266567103559397963"
            "9877479601078187812630071319031140452784581716784898210368"
            "8718636056998730723050006387409153564984387312473397273169"
            "6151400317153853980741262385655911710266585566867681870395"
            "6031062493194527159149245532930545654440112748012970999954"
            "1931989409080416563324524757147869014726780159355238611550"
            "1348035264934720193790268107107491703332226844753335720832"
            "4319360923828934583680601060115061698097530783422773183292"
            "4790498252473077637592724787465608477820373446969953364701"
            "7972677717585125660551199131504891101451037862738167250955"
            "8373897335989936648099411642057026370902792427675445652290"
            "87538682506419718265533447265625",
        )

        # largest numbers
        assert_equal(
            fpos32(np.finfo(np.float32).max, **preckwd(0)),
            "340282346638528859811704183484516925440.",
        )
        assert_equal(
            fpos64(np.finfo(np.float64).max, **preckwd(0)),
            "1797693134862315708145274237317043567980705675258449965989"
            "1747680315726078002853876058955863276687817154045895351438"
            "2464234321326889464182768467546703537516986049910576551282"
            "0762454900903893289440758685084551339423045832369032229481"
            "6580855933212334827479782620414472316873817718091929988125"
            "0404026184124858368.",
        )
        # Warning: In unique mode only the integer digits necessary for
        # uniqueness are computed, the rest are 0. Should we change this?
        assert_equal(
            fpos32(np.finfo(np.float32).max, precision=0),
            "340282350000000000000000000000000000000.",
        )

        # test trailing zeros
        assert_equal(fpos32("1.0", unique=False, precision=3), "1.000")
        assert_equal(fpos64("1.0", unique=False, precision=3), "1.000")
        assert_equal(fsci32("1.0", unique=False, precision=3), "1.000e+00")
        assert_equal(fsci64("1.0", unique=False, precision=3), "1.000e+00")
        assert_equal(fpos32("1.5", unique=False, precision=3), "1.500")
        assert_equal(fpos64("1.5", unique=False, precision=3), "1.500")
        assert_equal(fsci32("1.5", unique=False, precision=3), "1.500e+00")
        assert_equal(fsci64("1.5", unique=False, precision=3), "1.500e+00")
        # gh-10713
        assert_equal(
            fpos64("324", unique=False, precision=5, fractional=False), "324.00"
        )
Пример #21
0
def ztf_forced_photometry(ra,
                          decl,
                          jdstart=None,
                          jdend=None,
                          days=60,
                          send=True,
                          verbose=True):

    # Wget is required for the ZTF forced photometry request submission
    wget_installed = wget_check()
    if wget_installed == False:
        return None

    #
    # Set dates
    #
    if jdend is None:

        jdend = Time(datetime.utcnow(), scale='utc').jd

    if jdstart is None:

        jdstart = jdend - days

    if ra is not None and decl is not None:

        # Check if ra is a decimal
        try:
            # These will trigger the exception if they aren't float
            float(ra)
            float(decl)
            skycoord = SkyCoord(ra, decl, frame='icrs', unit='deg')

        # Else assume sexagesimal
        except Exception:
            skycoord = SkyCoord(ra,
                                decl,
                                frame='icrs',
                                unit=(u.hourangle, u.deg))

        # Convert to string to keep same precision. This will make matching easier in the case of submitting multiple jobs.
        jdend_str = np.format_float_positional(float(jdend), precision=6)
        jdstart_str = np.format_float_positional(float(jdstart), precision=6)
        ra_str = np.format_float_positional(float(skycoord.ra.deg),
                                            precision=6)
        decl_str = np.format_float_positional(float(skycoord.dec.deg),
                                              precision=6)

        log_file_name = random_log_file_name()  # Unique file name

        if verbose:
            print(f"Sending ZTF request for (R.A.,Decl)=({ra},{decl})")

        wget_command = f"wget --http-user={_ztfuser} --http-passwd={_ztfinfo} -O {log_file_name} \"https://ztfweb.ipac.caltech.edu/cgi-bin/requestForcedPhotometry.cgi?" + \
                       f"ra={ra_str}&" + \
                       f"dec={decl_str}&" + \
                       f"jdstart={jdstart_str}&" +\
                       f"jdend={jdend_str}&" + \
                       f"email={_ztffp_user_address}&userpass={_ztffp_user_password}\""

        if verbose:
            print(wget_command)

        if send:

            p = subprocess.Popen(wget_command,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 shell=True)
            stdout, stderr = p.communicate()

            if verbose:
                print(stdout.decode('utf-8'))

        return log_file_name

    else:

        if verbose:
            print("Missing necessary R.A. or declination.")
        return None
Пример #22
0
def format_raw(raw):
    output = np.format_float_positional(raw, precision=4, trim='-')
    # doesn't seem to trim properly
    if output[-1] == '.':
        output = output[:-1]
    return output
Пример #23
0
def pretty_print_decimal_places(lr):
    return str(np.format_float_positional(lr).split('.')[1])
Пример #24
0
def numforce_float(num) -> float:
    return enforce_float(
        np.format_float_positional(num, unique=True, trim='-', precision=10))
Пример #25
0
 def predict(self):
     args = self.args
     if args.verbosity:
         print("Preparing model for prediction")
     self.model_id = self.experiment_config["experiment"]["name"]
     if not args.trials:
         args.trials = os.path.join(self.cache_dir, self.model_id, "predictions", "trials")
     if not args.scores:
         args.scores = os.path.join(self.cache_dir, self.model_id, "predictions", "scores")
     self.make_named_dir(os.path.dirname(args.trials))
     self.make_named_dir(os.path.dirname(args.scores))
     training_config = self.experiment_config["experiment"]
     feat_config = self.experiment_config["features"]
     if args.verbosity > 1:
         print("Using model parameters:")
         yaml_pprint(training_config)
         print()
     if args.verbosity > 1:
         print("Using feature extraction parameters:")
         yaml_pprint(feat_config)
         print()
     model = self.create_model(dict(training_config), skip_training=True)
     if args.verbosity > 1:
         print("Preparing model")
     labels = self.experiment_config["dataset"]["labels"]
     model.prepare(labels, training_config)
     checkpoint_dir = self.get_checkpoint_dir()
     if args.checkpoint:
         checkpoint_path = os.path.join(checkpoint_dir, args.checkpoint)
     elif "best_checkpoint" in self.experiment_config.get("prediction", {}):
         checkpoint_path = os.path.join(checkpoint_dir, self.experiment_config["prediction"]["best_checkpoint"])
     else:
         checkpoints = os.listdir(checkpoint_dir) if os.path.isdir(checkpoint_dir) else []
         if not checkpoints:
             print("Error: Cannot evaluate with a model that has no checkpoints, i.e. is not trained.")
             return 1
         if "checkpoints" in training_config:
             monitor_value = training_config["checkpoints"]["monitor"]
             monitor_mode = training_config["checkpoints"].get("mode")
         else:
             monitor_value = "epoch"
             monitor_mode = None
         checkpoint_path = os.path.join(checkpoint_dir, models.get_best_checkpoint(checkpoints, key=monitor_value, mode=monitor_mode))
     if args.verbosity:
         print("Loading model weights from checkpoint file '{}'".format(checkpoint_path))
     model.load_weights(checkpoint_path)
     if args.verbosity:
         print("\nEvaluating testset with model:")
         print(str(model))
         print()
     ds = "test"
     if args.verbosity > 2:
         print("Dataset config for '{}'".format(ds))
         yaml_pprint(training_config[ds])
     ds_config = dict(training_config, **training_config[ds])
     del ds_config["train"], ds_config["validation"]
     if args.verbosity and "dataset_logger" in ds_config:
         print("Warning: dataset_logger in the test datagroup has no effect.")
     datagroup_key = ds_config.pop("datagroup")
     datagroup = self.experiment_config["dataset"]["datagroups"][datagroup_key]
     utt2path_path = os.path.join(datagroup["path"], datagroup.get("utt2path", "utt2path"))
     utt2label_path = os.path.join(datagroup["path"], datagroup.get("utt2label", "utt2label"))
     utt2path = collections.OrderedDict(
         row[:2] for row in parse_space_separated(utt2path_path)
     )
     utt2label = collections.OrderedDict(
         row[:2] for row in parse_space_separated(utt2label_path)
     )
     utterance_list = list(utt2path.keys())
     if args.file_limit:
         utterance_list = utterance_list[:args.file_limit]
         if args.verbosity > 3:
             print("Using utterance ids:")
             yaml_pprint(utterance_list)
     int2label = self.experiment_config["dataset"]["labels"]
     label2int, OH = make_label2onehot(int2label)
     def label2onehot(label):
         return OH[label2int.lookup(label)]
     labels_set = set(int2label)
     paths = []
     paths_meta = []
     for utt in utterance_list:
         label = utt2label[utt]
         if label not in labels_set:
             continue
         paths.append(utt2path[utt])
         paths_meta.append((utt, label))
     if args.verbosity:
         print("Extracting test set features for prediction")
     features = self.extract_features(
         feat_config,
         "test",
         trim_audio=False,
         debug_squeeze_last_dim=(ds_config["input_shape"][-1] == 1),
     )
     conf_json, conf_checksum = config_checksum(self.experiment_config, datagroup_key)
     features = tf_data.prepare_dataset_for_training(
         features,
         ds_config,
         feat_config,
         label2onehot,
         self.model_id,
         verbosity=args.verbosity,
         conf_checksum=conf_checksum,
     )
     # drop meta wavs required only for vad
     features = features.map(lambda *t: t[:3])
     if ds_config.get("persistent_features_cache", True):
         features_cache_dir = os.path.join(self.cache_dir, "features")
     else:
         features_cache_dir = "/tmp/tensorflow-cache"
     features_cache_path = os.path.join(
         features_cache_dir,
         self.experiment_config["dataset"]["key"],
         ds,
         feat_config["type"],
         conf_checksum,
     )
     self.make_named_dir(os.path.dirname(features_cache_path), "features cache")
     if not os.path.exists(features_cache_path + ".md5sum-input"):
         with open(features_cache_path + ".md5sum-input", "w") as f:
             print(conf_json, file=f, end='')
         if args.verbosity:
             print("Writing features into new cache: '{}'".format(features_cache_path))
     else:
         if args.verbosity:
             print("Loading features from existing cache: '{}'".format(features_cache_path))
     features = features.cache(filename=features_cache_path)
     if args.verbosity:
         print("Gathering all utterance ids from features dataset iterator")
     # Gather utterance ids, this also causes the extraction pipeline to be evaluated
     utterance_ids = []
     i = 0
     if args.verbosity > 1:
         print(now_str(date=True), "- 0 samples done")
     for _, _, uttids in features.as_numpy_iterator():
         for uttid in uttids:
             utterance_ids.append(uttid.decode("utf-8"))
             i += 1
             if args.verbosity > 1 and i % 10000 == 0:
                 print(now_str(date=True), "-", i, "samples done")
     if args.verbosity > 1:
         print(now_str(date=True), "- all", i, "samples done")
     if args.verbosity:
         print("Features extracted, writing target and non-target language information for each utterance to '{}'.".format(args.trials))
     with open(args.trials, "w") as trials_f:
         for utt, target in utt2label.items():
             for lang in int2label:
                 print(lang, utt, "target" if target == lang else "nontarget", file=trials_f)
     if args.verbosity:
         print("Starting prediction with model")
     predictions = model.predict(features.map(lambda *t: t[0]))
     if args.verbosity > 1:
         print("Done predicting, model returned predictions of shape {}. Writing them to '{}'.".format(predictions.shape, args.scores))
     num_predictions = 0
     with open(args.scores, "w") as scores_f:
         print(*int2label, file=scores_f)
         for utt, pred in zip(utterance_ids, predictions):
             pred_scores = [np.format_float_positional(x, precision=args.score_precision) for x in pred]
             print(utt, *pred_scores, sep=args.score_separator, file=scores_f)
             num_predictions += 1
     if args.verbosity:
         print("Wrote {} prediction scores to '{}'.".format(num_predictions, args.scores))
Пример #26
0
start = timeit.default_timer()


# combined function
def f(x):
    return (x**2 + (np.sqrt(3) * x)**2)**3 - 4 * x**2 * (np.sqrt(3) * x)**2


# roots are labeled from x_0 to x_2 from left to right

# root of x_0 using bisection method
x0_root = optimize.bisect(f, -1, -0.4)
x0_root = np.format_float_positional(x0_root,
                                     precision=4,
                                     unique=False,
                                     fractional=False,
                                     trim='k')
y = np.sqrt(3) * float(x0_root)
int0 = (x0_root, y)
print("Intersection 1 is {}".format(int0))

# root of x_1 using newton method
x1_root = optimize.newton(f, -0.1, tol=1.48e-06)
if (x1_root < 0.0001) | (x1_root > -0.0001):
    x1_root = 0
y = np.sqrt(3) * float(x1_root)
int1 = (x1_root, y)
print("Root of x_1 is {}".format(int1))

# root of x_2 using bisection method
Пример #27
0
    def test_dragon4(self):
        # these tests are adapted from Ryan Juckett's dragon4 implementation,
        # see dragon4.c for details.

        fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k)
        fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k)
        fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k)
        fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k)

        preckwd = lambda prec: {'unique': False, 'precision': prec}

        assert_equal(fpos32('1.0'), "1.")
        assert_equal(fsci32('1.0'), "1.e+00")
        assert_equal(fpos32('10.234'), "10.234")
        assert_equal(fpos32('-10.234'), "-10.234")
        assert_equal(fsci32('10.234'), "1.0234e+01")
        assert_equal(fsci32('-10.234'), "-1.0234e+01")
        assert_equal(fpos32('1000.0'), "1000.")
        assert_equal(fpos32('1.0', precision=0), "1.")
        assert_equal(fsci32('1.0', precision=0), "1.e+00")
        assert_equal(fpos32('10.234', precision=0), "10.")
        assert_equal(fpos32('-10.234', precision=0), "-10.")
        assert_equal(fsci32('10.234', precision=0), "1.e+01")
        assert_equal(fsci32('-10.234', precision=0), "-1.e+01")
        assert_equal(fpos32('10.234', precision=2), "10.23")
        assert_equal(fsci32('-10.234', precision=2), "-1.02e+01")
        assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)),
                            '9.9999999999999995e-08')
        assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)),
                            '9.8813129168249309e-324')
        assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)),
                            '9.9999999999999694e-311')


        # test rounding
        # 3.1415927410 is closest float32 to np.pi
        assert_equal(fpos32('3.14159265358979323846', **preckwd(10)),
                            "3.1415927410")
        assert_equal(fsci32('3.14159265358979323846', **preckwd(10)),
                            "3.1415927410e+00")
        assert_equal(fpos64('3.14159265358979323846', **preckwd(10)),
                            "3.1415926536")
        assert_equal(fsci64('3.14159265358979323846', **preckwd(10)),
                            "3.1415926536e+00")
        # 299792448 is closest float32 to 299792458
        assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000")
        assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08")
        assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000")
        assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08")

        assert_equal(fpos32('3.14159265358979323846', **preckwd(25)),
                            "3.1415927410125732421875000")
        assert_equal(fpos64('3.14159265358979323846', **preckwd(50)),
                         "3.14159265358979311599796346854418516159057617187500")
        assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793")


        # smallest numbers
        assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149),
                    "0.00000000000000000000000000000000000000000000140129846432"
                    "4817070923729583289916131280261941876515771757068283889791"
                    "08268586060148663818836212158203125")
        
        assert_equal(fpos64(5e-324, unique=False, precision=1074),
                    "0.00000000000000000000000000000000000000000000000000000000"
                    "0000000000000000000000000000000000000000000000000000000000"
                    "0000000000000000000000000000000000000000000000000000000000"
                    "0000000000000000000000000000000000000000000000000000000000"
                    "0000000000000000000000000000000000000000000000000000000000"
                    "0000000000000000000000000000000000049406564584124654417656"
                    "8792868221372365059802614324764425585682500675507270208751"
                    "8652998363616359923797965646954457177309266567103559397963"
                    "9877479601078187812630071319031140452784581716784898210368"
                    "8718636056998730723050006387409153564984387312473397273169"
                    "6151400317153853980741262385655911710266585566867681870395"
                    "6031062493194527159149245532930545654440112748012970999954"
                    "1931989409080416563324524757147869014726780159355238611550"
                    "1348035264934720193790268107107491703332226844753335720832"
                    "4319360923828934583680601060115061698097530783422773183292"
                    "4790498252473077637592724787465608477820373446969953364701"
                    "7972677717585125660551199131504891101451037862738167250955"
                    "8373897335989936648099411642057026370902792427675445652290"
                    "87538682506419718265533447265625")

        # largest numbers
        f32x = np.finfo(np.float32).max
        assert_equal(fpos32(f32x, **preckwd(0)),
                    "340282346638528859811704183484516925440.")
        assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)),
                    "1797693134862315708145274237317043567980705675258449965989"
                    "1747680315726078002853876058955863276687817154045895351438"
                    "2464234321326889464182768467546703537516986049910576551282"
                    "0762454900903893289440758685084551339423045832369032229481"
                    "6580855933212334827479782620414472316873817718091929988125"
                    "0404026184124858368.")
        # Warning: In unique mode only the integer digits necessary for
        # uniqueness are computed, the rest are 0.
        assert_equal(fpos32(f32x),
                    "340282350000000000000000000000000000000.")

        # Further tests of zero-padding vs rounding in different combinations
        # of unique, fractional, precision, min_digits
        # precision can only reduce digits, not add them.
        # min_digits can only extend digits, not reduce them.
        assert_equal(fpos32(f32x, unique=True, fractional=True, precision=0),
                    "340282350000000000000000000000000000000.")
        assert_equal(fpos32(f32x, unique=True, fractional=True, precision=4),
                    "340282350000000000000000000000000000000.")
        assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=0),
                    "340282346638528859811704183484516925440.")
        assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=4),
                    "340282346638528859811704183484516925440.0000")
        assert_equal(fpos32(f32x, unique=True, fractional=True,
                                    min_digits=4, precision=4),
                    "340282346638528859811704183484516925440.0000")
        assert_raises(ValueError, fpos32, f32x, unique=True, fractional=False,
                                          precision=0)
        assert_equal(fpos32(f32x, unique=True, fractional=False, precision=4),
                    "340300000000000000000000000000000000000.")
        assert_equal(fpos32(f32x, unique=True, fractional=False, precision=20),
                    "340282350000000000000000000000000000000.")
        assert_equal(fpos32(f32x, unique=True, fractional=False, min_digits=4),
                    "340282350000000000000000000000000000000.")
        assert_equal(fpos32(f32x, unique=True, fractional=False,
                                  min_digits=20),
                    "340282346638528859810000000000000000000.")
        assert_equal(fpos32(f32x, unique=True, fractional=False,
                                  min_digits=15),
                    "340282346638529000000000000000000000000.")
        assert_equal(fpos32(f32x, unique=False, fractional=False, precision=4),
                    "340300000000000000000000000000000000000.")
        # test that unique rounding is preserved when precision is supplied
        # but no extra digits need to be printed (gh-18609)
        a = np.float64.fromhex('-1p-97')
        assert_equal(fsci64(a, unique=True), '-6.310887241768095e-30')
        assert_equal(fsci64(a, unique=False, precision=15),
                     '-6.310887241768094e-30')
        assert_equal(fsci64(a, unique=True, precision=15),
                     '-6.310887241768095e-30')
        assert_equal(fsci64(a, unique=True, min_digits=15),
                     '-6.310887241768095e-30')
        assert_equal(fsci64(a, unique=True, precision=15, min_digits=15),
                     '-6.310887241768095e-30')
        # adds/remove digits in unique mode with unbiased rnding
        assert_equal(fsci64(a, unique=True, precision=14),
                     '-6.31088724176809e-30')
        assert_equal(fsci64(a, unique=True, min_digits=16),
                     '-6.3108872417680944e-30')
        assert_equal(fsci64(a, unique=True, precision=16),
                     '-6.310887241768095e-30')
        assert_equal(fsci64(a, unique=True, min_digits=14),
                     '-6.310887241768095e-30')
        # test min_digits in unique mode with different rounding cases
        assert_equal(fsci64('1e120', min_digits=3), '1.000e+120')
        assert_equal(fsci64('1e100', min_digits=3), '1.000e+100')

        # test trailing zeros
        assert_equal(fpos32('1.0', unique=False, precision=3), "1.000")
        assert_equal(fpos64('1.0', unique=False, precision=3), "1.000")
        assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00")
        assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00")
        assert_equal(fpos32('1.5', unique=False, precision=3), "1.500")
        assert_equal(fpos64('1.5', unique=False, precision=3), "1.500")
        assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00")
        assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00")
        # gh-10713
        assert_equal(fpos64('324', unique=False, precision=5,
                                   fractional=False), "324.00")
def out_clusters_pdf(data, d, sp_labels, km_labels, true_labels, k, K):
    """
    output a visualization of the result of the
    spectral clustering and k-means clustering
    algorithms, and the jaccard score for each.

    this function sometimes uses separate commands
    for 2d and 3d data.

    :param data: Data of (n*d) generaterd from sklearn
    :param d: dimension of data (2 or 3)
    :param sp_labels: lables based on spectral clustering
    :param km_labels: lables based on k-means clustering
    :param true_labels: Original lables based on make-blobs
    :param k: if Random is false number of Clusters based on make-blobs
    :param K: number of Clusters by user
    :return: NULL
    """
    # figure setup
    fig = plt.figure(figsize=plt.figaspect(0.5))


    # prepare the plot data
    x = data[:, 0]
    y = data[:, 1]
    if d == 3:
        z = data[:, 2]

    # create a subplot: spectral clustering
    if d == 2:
        plt.subplot(221)
        plt.scatter(x, y, c=sp_labels)
    else:
        ax = fig.add_subplot(2, 2, 1, projection='3d')
        ax.scatter3D(x, y, z, c=sp_labels)
    plt.title('Normalized Spectral Clustering')

    # create a subplot: kmeans clustering
    if d == 2:
        plt.subplot(222)
        plt.scatter(x, y, c=km_labels)
    else:
        ax = fig.add_subplot(2, 2, 2, projection='3d')
        ax.scatter3D(x, y, z, c=km_labels)

    plt.title('K-means')

    # prepare the plot text
    # jaccard scores for each algorithm's result
    sp_jaccard = jaccard_measure(sp_labels, true_labels)
    sp_jaccard_str = np.format_float_positional(sp_jaccard, precision=2)
    km_jaccard = jaccard_measure(km_labels, true_labels)
    km_jaccard_str = np.format_float_positional(km_jaccard, precision=2)
    # program parameters
    n = str(data.shape[0])
    K = str(K)
    k = str(k)

    # text to be displayed
    text = ("Data was generated from the values:\n" + "n = " + n + " , k = " +
            K + "\n" + "The k that was used for both algorithms was " +
            k + "\n" + "The jaccard measure for Spectral Clustering: " +
            sp_jaccard_str + "\n" + "The jaccard measure for K-means: " +
            km_jaccard_str)

    # add the text in place of a subplot
    fig.add_subplot(2, 2, (3, 4), frameon=False)
    plt.text(0.5, 0.5, text, ha='center', wrap=False)
    plt.axis('off')

    # save to pdf
    plt.savefig("clusters.pdf")
Пример #29
0
def min(value):
    print(str(np.format_float_positional(np.min(np.array(value)), 2)))
Пример #30
0
def format_performance_info(performance_values):
     performance_text = ""
     for counter, value in enumerate(performance_values):
          performance_text += str(counter) + ": " + str(np.format_float_positional(value, 2))
          
     return performance_text
Пример #31
0
    def test_dragon4(self):
        # these tests are adapted from Ryan Juckett's dragon4 implementation,
        # see dragon4.c for details.

        fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k)
        fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k)
        fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k)
        fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k)

        preckwd = lambda prec: {'unique': False, 'precision': prec}

        assert_equal(fpos32('1.0'), "1.")
        assert_equal(fsci32('1.0'), "1.e+00")
        assert_equal(fpos32('10.234'), "10.234")
        assert_equal(fpos32('-10.234'), "-10.234")
        assert_equal(fsci32('10.234'), "1.0234e+01")
        assert_equal(fsci32('-10.234'), "-1.0234e+01")
        assert_equal(fpos32('1000.0'), "1000.")
        assert_equal(fpos32('1.0', precision=0), "1.")
        assert_equal(fsci32('1.0', precision=0), "1.e+00")
        assert_equal(fpos32('10.234', precision=0), "10.")
        assert_equal(fpos32('-10.234', precision=0), "-10.")
        assert_equal(fsci32('10.234', precision=0), "1.e+01")
        assert_equal(fsci32('-10.234', precision=0), "-1.e+01")
        assert_equal(fpos32('10.234', precision=2), "10.23")
        assert_equal(fsci32('-10.234', precision=2), "-1.02e+01")
        assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)),
                            '9.9999999999999995e-08')
        assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)),
                            '9.8813129168249309e-324')
        assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)),
                            '9.9999999999999694e-311')


        # test rounding
        # 3.1415927410 is closest float32 to np.pi
        assert_equal(fpos32('3.14159265358979323846', **preckwd(10)),
                            "3.1415927410")
        assert_equal(fsci32('3.14159265358979323846', **preckwd(10)),
                            "3.1415927410e+00")
        assert_equal(fpos64('3.14159265358979323846', **preckwd(10)),
                            "3.1415926536")
        assert_equal(fsci64('3.14159265358979323846', **preckwd(10)),
                            "3.1415926536e+00")
        # 299792448 is closest float32 to 299792458
        assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000")
        assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08")
        assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000")
        assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08")

        assert_equal(fpos32('3.14159265358979323846', **preckwd(25)),
                            "3.1415927410125732421875000")
        assert_equal(fpos64('3.14159265358979323846', **preckwd(50)),
                         "3.14159265358979311599796346854418516159057617187500")
        assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793")


        # smallest numbers
        assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149),
                    "0.00000000000000000000000000000000000000000000140129846432"
                    "4817070923729583289916131280261941876515771757068283889791"
                    "08268586060148663818836212158203125")
        assert_equal(fpos64(0.5**(1022 + 52), unique=False, precision=1074),
                    "0.00000000000000000000000000000000000000000000000000000000"
                    "0000000000000000000000000000000000000000000000000000000000"
                    "0000000000000000000000000000000000000000000000000000000000"
                    "0000000000000000000000000000000000000000000000000000000000"
                    "0000000000000000000000000000000000000000000000000000000000"
                    "0000000000000000000000000000000000049406564584124654417656"
                    "8792868221372365059802614324764425585682500675507270208751"
                    "8652998363616359923797965646954457177309266567103559397963"
                    "9877479601078187812630071319031140452784581716784898210368"
                    "8718636056998730723050006387409153564984387312473397273169"
                    "6151400317153853980741262385655911710266585566867681870395"
                    "6031062493194527159149245532930545654440112748012970999954"
                    "1931989409080416563324524757147869014726780159355238611550"
                    "1348035264934720193790268107107491703332226844753335720832"
                    "4319360923828934583680601060115061698097530783422773183292"
                    "4790498252473077637592724787465608477820373446969953364701"
                    "7972677717585125660551199131504891101451037862738167250955"
                    "8373897335989936648099411642057026370902792427675445652290"
                    "87538682506419718265533447265625")

        # largest numbers
        assert_equal(fpos32(np.finfo(np.float32).max, **preckwd(0)),
                    "340282346638528859811704183484516925440.")
        assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)),
                    "1797693134862315708145274237317043567980705675258449965989"
                    "1747680315726078002853876058955863276687817154045895351438"
                    "2464234321326889464182768467546703537516986049910576551282"
                    "0762454900903893289440758685084551339423045832369032229481"
                    "6580855933212334827479782620414472316873817718091929988125"
                    "0404026184124858368.")
        # Warning: In unique mode only the integer digits necessary for
        # uniqueness are computed, the rest are 0. Should we change this?
        assert_equal(fpos32(np.finfo(np.float32).max, precision=0),
                    "340282350000000000000000000000000000000.")

        # test trailing zeros
        assert_equal(fpos32('1.0', unique=False, precision=3), "1.000")
        assert_equal(fpos64('1.0', unique=False, precision=3), "1.000")
        assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00")
        assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00")
        assert_equal(fpos32('1.5', unique=False, precision=3), "1.500")
        assert_equal(fpos64('1.5', unique=False, precision=3), "1.500")
        assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00")
        assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00")
Пример #32
0
def map_df_to_str(df: pd.DataFrame) -> pd.DataFrame:
    return df.applymap(lambda x: np.format_float_positional(x, trim="-")
                       if isinstance(x, float) else x).astype(str)