Exemplo n.º 1
0
def results_retrieval(desc, splt):
    res = dill.load(
        open("results/" + desc + "_retrieval_" + splt['name'] + ".p"))
    print("%s - mAP 10K queries " % (blue(desc.upper())))
    n_q = float(len(res.keys()))
    heads = ['']
    mAP = dict.fromkeys(['e', 'h', 't'])
    for k in mAP:
        mAP[k] = dict.fromkeys(res[0][k])
        for psize in mAP[k]:
            mAP[k][psize] = 0

    for qid in res:
        for k in mAP:
            for psize in mAP[k]:
                mAP[k][psize] += res[qid][k][psize]['ap']

    results = []
    for k in ['e', 'h', 't']:
        heads = ['Noise'] + sorted(mAP[k])
        r = []
        for psize in sorted(mAP[k]):
            r.append(mAP[k][psize] / n_q)
        results.append([ft[k]] + r)

    res = np.array(results)[:, 1:].astype(np.float32)
    results.append(['mean'] + list(np.mean(res, axis=0)))
    print tb(results, headers=heads)
Exemplo n.º 2
0
def process_fits(recipients, params=None):

    # Read the HEALPix sky map and the FITS header.
    skymap, header = hp.read_map(params['skymap_fits'], h=True, verbose=False)

    # Print and save some values from the FITS header.
    header = dict(header)
    params['time'] = Time(header['DATE-OBS'], format='isot', scale='utc')
    time = Time.now()
    params['Distance'] = str(header['DISTMEAN']) + ' +/- ' + str(
        header['DISTSTD'])
    header['GraceID'] = params['GraceID']
    with open('./' + params['GraceID'] + '.dat', 'w') as f:
        data_p = []
        tableheaders = ['PARAMETER', 'VALUE']
        for prm in interesting_parameters:
            if prm in list(params.keys()):
                data_p.append((prm, params[prm]))
        print(tb(data_p, headers=tableheaders, tablefmt='fancy_grid'))
        f.write(tb(data_p, headers=tableheaders, tablefmt='html'))

    # Making a pie chart of the type of event for the email
    labels = ['BNS', 'NSBH', 'BBH', 'MassGap', 'Terrestrial']
    sizes = [float(params[label]) * 100 for label in labels]
    labels = ['%s (%.1f %%)' % (lab, pct) for lab, pct in zip(labels, sizes)]
    fig1, ax1 = plt.subplots()
    patches, texts = ax1.pie(sizes, startangle=90)
    ax1.legend(patches, labels, loc="best")
    ax1.axis(
        'equal')  # Equal aspect ratio ensures that pie is drawn as a circle.
    plt.savefig('piechart_%s.png' % params['GraceID'])

    prob, probfull, timetill90, m = prob_observable(skymap,
                                                    header,
                                                    time,
                                                    plot=plotting)

    params['skymap_array'] = m

    if timetill90 == -99:
        print("HET can't observe the source.")
        return
    else:
        print("Source has a {:.1f}% chance of being observable now.".format(
            int(round(100 * prob))))
        print(
            "Integrated probability over 24 hours (ignoring the sun) is {:.1f}%"
            .format(int(round(100 * probfull))))
        print('{:.1f} hours till you can observe the 90 % prob region.'.format(
            timetill90))
        send_notifications(params, timetill90, text=True, email=False)
        get_galaxies.write_catalog(params, 'MANGROVE')
        mincontour = get_LST.get_LST(targf='galaxies%s_%s.dat' %
                                     ('MANGROVE', params['GraceID']))
        make_phaseii.make_phaseii('LSTs_{}.out'.format(params['GraceID']))
        send_notifications(params, timetill90)
Exemplo n.º 3
0
def results_verification(desc, splt):
    v = {'balanced': 'auc', 'imbalanced': 'ap'}
    res = dill.load(
        open("results/" + desc + "_verification_" + splt['name'] + ".p"))
    for r in v:
        print("%s - %s variant (%s) " %
              (blue(desc.upper()), r.capitalize(), v[r]))
        heads = ["Noise", "Inter", "Intra"]
        results = []
        for t in ['e', 'h', 't']:
            results.append(
                [ft[t], res[t]['inter'][r][v[r]], res[t]['intra'][r][v[r]]])
        print tb(results, headers=heads)
Exemplo n.º 4
0
    def show(self):
        text = []
        text.append('\nError normalised (Y - F)')
        text.append(tb([self.norm_error]))

        text.append('\nError (Y_ - F_))')
        text.append(tb([self.error]))

        # text.append('Input data: X')
        # text.append(tb(np.array(self.datas[:, :self.dim_integral[2]])))
        #
        # text.append('\nInput data: Y')
        # text.append(tb(np.array(self.datas[:, self.dim_integral[2]:self.dim_integral[3]])))
        #
        # text.append('\nX normalised:')
        # text.append(tb(np.array(self.data[:, :self.dim_integral[2]])))
        #
        # text.append('\nY normalised:')
        # text.append(tb(np.array(self.data[:, self.dim_integral[2]:self.dim_integral[3]])))
        #
        # text.append('\nmatrix B:')
        # text.append(tb(np.array(self.B)))
        #
        # # text.append('\nmatrix A:')
        # # text.append(tb(np.array(self.A)))
        #
        # text.append('\nmatrix Lambda:')
        # text.append(tb(np.array(self.Lamb)))
        #
        # for j in range(len(self.Psi)):
        #     s = '\nmatrix Psi%i:' % (j + 1)
        #     text.append(s)
        #     text.append(tb(np.array(self.Psi[j])))
        #
        # text.append('\nmatrix a:')
        # text.append(tb(self.a.tolist()))
        #
        # for j in range(len(self.Fi)):
        #     s = '\nmatrix F%i:' % (j + 1)
        #     text.append(s)
        #     text.append(tb(np.array(self.Fi[j])))
        #
        # text.append('\nmatrix c:')
        # text.append(tb(np.array(self.c)))
        #
        # text.append('\nY rebuilt normalized :')
        # text.append(tb(np.array(self.F)))
        #
        # text.append('\nY rebuilt :')
        # text.append(tb(self.F_.tolist()))
        return '\n'.join(text)
Exemplo n.º 5
0
    def show(self):
        text = []
        text.append('\nError normalised (Y - F)')
        text.append(tb([self.norm_error]))

        text.append('\nError (Y_ - F_))')
        text.append(tb([self.error]))

        # text.append('Input data: X')
        # text.append(tb(np.array(self.datas[:, :self.dim_integral[2]])))
        #
        # text.append('\nInput data: Y')
        # text.append(tb(np.array(self.datas[:, self.dim_integral[2]:self.dim_integral[3]])))
        #
        # text.append('\nX normalised:')
        # text.append(tb(np.array(self.data[:, :self.dim_integral[2]])))
        #
        # text.append('\nY normalised:')
        # text.append(tb(np.array(self.data[:, self.dim_integral[2]:self.dim_integral[3]])))
        #
        # text.append('\nmatrix B:')
        # text.append(tb(np.array(self.B)))
        #
        # # text.append('\nmatrix A:')
        # # text.append(tb(np.array(self.A)))
        #
        # text.append('\nmatrix Lambda:')
        # text.append(tb(np.array(self.Lamb)))
        #
        # for j in range(len(self.Psi)):
        #     s = '\nmatrix Psi%i:' % (j + 1)
        #     text.append(s)
        #     text.append(tb(np.array(self.Psi[j])))
        #
        # text.append('\nmatrix a:')
        # text.append(tb(self.a.tolist()))
        #
        # for j in range(len(self.Fi)):
        #     s = '\nmatrix F%i:' % (j + 1)
        #     text.append(s)
        #     text.append(tb(np.array(self.Fi[j])))
        #
        # text.append('\nmatrix c:')
        # text.append(tb(np.array(self.c)))
        #
        # text.append('\nY rebuilt normalized :')
        # text.append(tb(np.array(self.F)))
        #
        # text.append('\nY rebuilt :')
        # text.append(tb(self.F_.tolist()))
        return '\n'.join(text)
Exemplo n.º 6
0
def show_table(table):
        """Prints a table with tabulate 0.7.2.
For more information of this module visit: https://pypi.python.org/pypi/tabulate/0.7.2.
"""

        print(tb(table[1:],table[0],"grid"))  
        return 'Thanks tabulate'
Exemplo n.º 7
0
def display_results(VICTORY_LIST, GAME_PLAYERS):
    header = ["Winner1", "winner2", "winner3", "winner4", "winner5"]
    m_o_h = []
    for i in VICTORY_LIST:
        m_o_h.append(GAME_PLAYERS[i].player_name)
    m_o_h = [m_o_h]
    print(tb(m_o_h, header, tablefmt="grid"))
 def rc(self):
     # Selecting the columns from the DataFrame
     data_df = self.sub_data[['Order', 'Name', 'RC']]
     data_df.sort_values(by=['RC'], inplace = True, ascending=False)
     
     # Returning tabulated data with the column headings
     return tb(data_df, headers=["Roll Order", "Name", "RC"], tablefmt='grid', showindex='never') #Reference link - https://pypi.org/project/tabulate/
Exemplo n.º 9
0
def to_table(df, tablefmt, filename, str_img):
    """
    Export dataframe to a table file with a specific format
    (see tabulate doc for more information: 
        https://pypi.org/project/tabulate/):
        - "plain"
        - "simple"
        - "grid"
        - "fancy_grid"
        - "pipe"
        - "orgtbl"
        - "jira"
        - "presto"
        - "psql"
        - "rst"
        - "mediawiki"
        - "moinmoin"
        - "youtrack"
        - "html"
        - "latex"
        - "latex_raw"
        - "latex_booktabs"
        - "textile"
        
    @param df(dataframe): dataframe with stats and img
    @tablefmt(str): format of the table
    @filename(str): path and name to file
    """
    df = df.apply(lambda x: img_str(x, str_img), axis=1)

    f = open(filename, 'w')
    f.write(tb(df, headers="keys", showindex=False, tablefmt=tablefmt))
    f.close()
Exemplo n.º 10
0
def results_retrieval(desc,splt,more_info=False):
    res = dill.load(open(os.path.join("results", desc+"_retrieval_"+splt['name']+".p"), "rb"))
    if more_info:
        print("%s - mAP 10K queries " % (blue(desc.upper())))
    n_q= float(len(res.keys()))
    heads = ['']
    mAP = dict.fromkeys(['e','h','t'])
    for k in mAP:
        mAP[k] = dict.fromkeys(res[0][k])
        for psize in mAP[k]:
            mAP[k][psize] = 0

    for qid in res:
        for k in mAP:
            for psize in mAP[k]:
                mAP[k][psize] +=  res[qid][k][psize]['ap']

    results = []
    for k in ['e','h','t']:
        heads = ['Noise']+sorted(mAP[k])
        r = []
        for psize in sorted(mAP[k]):
            r.append(mAP[k][psize]/n_q)
        results.append([ft[k]]+r)

    res = np.array(results)[:,1:].astype(np.float32)
    results.append(['mean']+list(np.mean(res,axis=0)))
    if more_info:
        print(tb(results,headers=heads))
    mAP = np.asarray(results[-1][1:]).mean()
    print('Mean Average Precision is {:f}'.format(mAP))
Exemplo n.º 11
0
def analyzer_printer(i, ign_val=70):

    exp = []

    print("\n\n")

    exp.append(["Dataype", df[i].dtype])

    exp.append(["Total value", len(df[i])])

    exp.append(["Total Null", df[i].isnull().sum()])

    exp.append(["Total Uniques", df[i].nunique()])

    # exp.append(['Minimum value', min(df[i])])

    # exp.append(['Maximum value', max(df[i])])

    exp.append([
        "Percent Uniques\n[Round Figure]",
        round(df[i].nunique() / len(df[i]) * 100)
    ])

    exp.append([
        "Might be categorical",
        "Yes" if round(df[i].nunique() / len(df[i]) * 100) <= 10 else "No",
    ])

    exp.append([
        "Column might be ignored\n[based on uniqueness]",
        "Yes"
        if round(df[i].nunique() / len(df[i]) * 100) >= ign_val else "No",
    ])

    print(tb(exp, headers=["Cloumn name", i], tablefmt="fancy_grid"))
 def math(self):
     # Selecting the columns from the DataFrame
     data_df = self.sub_data[['Order', 'Name', 'Math']]
     data_df.sort_values(by=['Math'], inplace = True, ascending=False)
     
     # Returning tabulated data with the column headings
     return tb(data_df, headers=["Roll Order", "Name", "Math"], tablefmt='grid', showindex='never')
Exemplo n.º 13
0
def results_matching(desc, splt):
    res = dill.load(
        open("results/" + desc + "_matching_" + splt['name'] + ".p"))
    mAP = {'e': 0, 'h': 0, 't': 0}
    k_mAP = 0
    heads = [ft['e'], ft['h'], ft['t'], 'mean']
    for seq in res:
        for t in ['e', 'h', 't']:
            for idx in range(1, 6):
                mAP[t] += res[seq][t][idx]['ap']
                k_mAP += 1
    k_mAP = k_mAP / 3.0
    print("%s - mAP " % (blue(desc.upper())))

    results = [mAP['e'] / k_mAP, mAP['h'] / k_mAP, mAP['t'] / k_mAP]
    results.append(sum(results) / float(len(results)))
    print tb([results], headers=heads)
    print("\n")
Exemplo n.º 14
0
def counter(html):
    tags = []
    res = {}
    soup = BS(html, 'html.parser')
    for tag in soup.findAll():
        tags.append(tag.name)
    uniq = list(set(tags))
    for tag in uniq:
        res[tag] = tags.count(tag)
    sort=sorted(res.items(), key=lambda x:(x[1],x[0]))
    restb=tb(sort, headers=['Tags', 'Numbers'], tablefmt='psql')
    print(restb)
    return res, restb
Exemplo n.º 15
0
def results_verification(desc,splt,more_info=False):
    v = {'imbalanced':'ap'}
    res = dill.load(open(os.path.join("results", desc+"_verification_"+splt['name']+".p"), "rb"))
    for r in v:
        if more_info:
            print("%s - %s variant (%s) " % (blue(desc.upper()),r.capitalize(),v[r]))
        heads = ["Noise","Inter","Intra"]
        results = []
        for t in ['e','h','t']:
            results.append([ft[t], res[t]['inter'][r][v[r]],res[t]['intra'][r][v[r]]])
        if more_info:
            print(tb(results,headers=heads))
    mAP = np.asarray(list(map(lambda x: x[1:], results))).mean()
    print('Mean Average Precision is {:f}'.format(mAP))
Exemplo n.º 16
0
def counter(html):
    """
    Here is implemented the main functionality.
    This function counts tags in HTML page and provide sorted output
    in table format.
    """
    tags = []
    res = {}
    soup = BS(html, 'html.parser')
    for tag in soup.findAll():
        tags.append(tag.name)
    uniq = list(set(tags))
    for tag in uniq:
        res[tag] = tags.count(tag)
    sort=sorted(res.items(), key=lambda x:(x[1],x[0]))
    restb=tb(sort, headers=['Tags', 'Numbers'], tablefmt='psql')
    print(restb)
    return res, restb
Exemplo n.º 17
0
def show_agreement_types():

    agmt_name = input('What agreement? ')
    sql = """
    select
        AgreementTypes.Title
        ,AgreementTypes.ID
        from
            CircleOne..AgreementTypes
        where
            1=1
            and AgreementTypes.Title like '%{}%'
    """

    cxn = engine.connect()
    p = pd.read_sql_query(sql.format(agmt_name), cxn)
    cxn.close()
    print(tb(p, headers='keys'))
Exemplo n.º 18
0
def tabulate():
    try:
        request_dict = request.get_json()
        jsonstr = request_dict['jsonStr']
        jsonstr = json.dumps(jsonstr)
        df = pd.read_json(eval(jsonstr), orient='split')
        headers = 'keys'
        tableformat = 'orgtbl'
        tabulated_df = tb(df, headers=headers, tablefmt=tableformat)
        response = app.response_class(response=tabulated_df,
                                      status=200,
                                      mimetype='application/json')
    except:
        exception = ExceptionHelpers.format_exception(sys.exc_info())
        response = app.response_class(response=exception,
                                      status=400,
                                      mimetype='application/json')
    return response
    def marks(self, class_call=None):
        self.class_call = class_call
        
        # Selecting the columns to be displayed
        data_df = pd.DataFrame(self.stu_data, columns = ['Order', 'Name', 'RC', 'Listening', 'Writing', 'Math'])
        
        # Searching for the entire or part of student name in the 'Name' column of the DataFrame using the name entered by the user
        data_df = data_df[data_df['Name'].str.contains(self.stu_name)] # Reference link - https://davidhamann.de/2017/06/26/pandas-select-elements-by-string/

        if data_df.empty == True: # Reference link - https://pandas.pydata.org/pandas-docs/version/0.18/generated/pandas.DataFrame.empty.html
            return 'No student found!'
        
        # Checking to see if data is requested from another class or for tabulating and displaying the DataFrame
        elif class_call == None:
            return tb(data_df, headers=["Roll Order", "Name", "RC", "Listening", "Writing", "Math"], tablefmt='grid', showindex='never')
        
        # If data requested from another class then DataFrame is sent directly without tabulating it
        else:
            return data_df
Exemplo n.º 20
0
def results_matching(desc,splt,more_info=False):
    res = dill.load(open(os.path.join("results", desc+"_matching_"+splt['name']+".p"), "rb"))
    mAP = {'e':0,'h':0,'t':0}
    k_mAP = 0
    heads = [ft['e'],ft['h'],ft['t'],'mean']
    for seq in res:
        for t in ['e','h','t']:
            for idx in range(1,6):
                mAP[t] += res[seq][t][idx]['ap']
                k_mAP+=1
    k_mAP = k_mAP / 3.0
    if more_info:
        print("%s - mAP " % (blue(desc.upper())))

    results = [mAP['e']/k_mAP,mAP['h']/k_mAP,mAP['t']/k_mAP]
    results.append(sum(results)/float(len(results)))
    if more_info:
        print(tb([results],headers=heads))
    print('Mean Average Precision is {:f}'.format(results[-1]))
    print("\n")
# Correlation matrix
toyota.corr()

np.mean(toyota)
toyota['Price'].mean()
toyota['Price'].median()
toyota['Price'].mode()
toyota['Price'].var()
toyota['Price'].std()

print(toyota.describe())
descriptive = toyota.describe()

from tabulate import tabulate as tb
print(tb(descriptive, toyota.columns))

######### boxplots ###########

plt.boxplot(toyota.Price)
plt.xticks([
    1,
], ['Price'])
plt.boxplot(toyota.Age_08_04)
plt.xticks([
    1,
], ['Age'])
plt.boxplot(toyota.KM)
plt.xticks([
    1,
], ['KM'])
Exemplo n.º 22
0
#!/usr/bin/python3
import sys
sys.path.append('../')
import argparse
from exchange.exchange import create_exchange
from tabulate import tabulate as tb
import pprint

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='account')
    parser.add_argument('-e', help='exchange, eg: binance, binance_margin')
    args = parser.parse_args()
    # print(args)
    if not (args.e):
        parser.print_help()
        exit(1)

    exchange = create_exchange(args.e)
    if not exchange:
        print("exchange error!")
        exit(1)

    account = exchange.get_account()
    print("account info:" )
    #pprint.pprint(account)
    print(tb(account['balances']))
Exemplo n.º 23
0
# Correlation matrix
Computer_Data.corr()

np.mean(Computer_Data)
Computer_Data['price'].mean()
Computer_Data['price'].median()
Computer_Data['price'].mode()
Computer_Data['price'].var()
Computer_Data['price'].std()

print(Computer_Data.describe())
descriptive = Computer_Data.describe()

from tabulate import tabulate as tb
print(tb(descriptive, Computer_Data.columns))

######### boxplots ###########

import seaborn as sns

plt.boxplot(Computer_Data.price)
plt.xticks([
    1,
], ['price'])
plt.boxplot(Computer_Data.speed)
plt.xticks([
    1,
], ['speed'])
plt.boxplot(Computer_Data.hd)
plt.xticks([
Exemplo n.º 24
0
def _printiso(tsd,
              date_format=None,
              sep=',',
              float_format='%g',
              showindex="never",
              headers="keys",
              tablefmt="csv"):
    """Separate so can use in tests."""
    sys.tracebacklimit = 1000

    if isinstance(tsd, (pd.DataFrame, pd.Series)):
        if isinstance(tsd, pd.Series):
            tsd = pd.DataFrame(tsd)

        if len(tsd.columns) == 0:
            tsd = pd.DataFrame(index=tsd.index)

        # Not perfectly true, but likely will use showindex for indices
        # that are not time stamps.
        if showindex is True:
            if not tsd.index.name:
                tsd.index.name = 'UniqueID'
        else:
            if not tsd.index.name:
                tsd.index.name = 'Datetime'

        print_index = True
        if tsd.index.is_all_dates is True:
            if tsd.index.name is None:
                tsd.index.name = 'Datetime'
            # Someone made the decision about the name
            # This is how I include time zone info by tacking on to the
            # index.name.
            elif 'datetime' not in tsd.index.name.lower():
                tsd.index.name = 'Datetime'
        else:
            # This might be overkill, but tstoolbox is for time-series.
            # Revisit if necessary.
            print_index = False

        if tsd.index.name == 'UniqueID':
            print_index = False

        if showindex in ['always', 'default']:
            print_index = True

    elif isinstance(tsd, (int, float, tuple, pd.np.ndarray)):
        tablefmt = None

    if tablefmt in ["csv", "tsv", "csv_nos", "tsv_nos"]:
        sep = {
            "csv": ",",
            "tsv": "\\t",
            "csv_nos": ",",
            "tsv_nos": "\\t"
        }[tablefmt]
        if isinstance(tsd, pd.DataFrame):
            try:
                tsd.to_csv(sys.stdout,
                           float_format=float_format,
                           date_format=date_format,
                           sep=sep,
                           index=print_index)
                return
            except IOError:
                return
        else:
            fmt = simple_separated_format(sep)
    else:
        fmt = tablefmt

    if fmt is None:
        print(str(list(tsd))[1:-1])
    elif tablefmt in ['csv_nos', 'tsv_nos']:
        print(
            tb(tsd, tablefmt=fmt, showindex=showindex,
               headers=headers).replace(' ', ''))
    else:
        print(tb(tsd, tablefmt=fmt, showindex=showindex, headers=headers))
Exemplo n.º 25
0
sns.heatmap(heat1, xticklabels=bank_conti.columns, yticklabels=bank_conti.columns, annot=True)


# Scatter plot between the variables along with histograms
sns.pairplot(bank_conti)

# usage lambda and apply function
# apply function => we use to apply custom function operation on 
# each column
# lambda just an another syntax to apply a function on each value 
# without using for loop 
bank.isnull().sum()


from tabulate import tabulate as tb
print(tb(descriptive,bank.columns))

bank.apply(lambda x:x.mean()) 
bank.mean()

bank.dtypes
bank.columns

job_dum = pd.get_dummies(bank.job,drop_first = True)

df_dummies = pd.get_dummies(bank, columns = ['job', 'marital', 'education','default','housing','loan','contact','month','poutcome','y'], drop_first = True)
bank = df_dummies
bank_total_dum = pd.get_dummies(bank_ori, columns = ['job', 'marital', 'education','default','housing','loan','contact','month','poutcome','y'], drop_first = False)


#Removing special characters from the dataframe
Exemplo n.º 26
0
    def show(self):
        text = []

        text.append('Введенные данные: X')
        text.append(tb(np.array(self.datas[:, :self.dim_integral[2]])))

        text.append('\nВведенные данные: Y')
        text.append(tb(np.array(self.datas[:, self.dim_integral[2]:self.dim_integral[3]])))

        text.append('\nX нормализованные:')
        text.append(tb(np.array(self.data[:, :self.dim_integral[2]])))

        text.append('\nY нормализованные:')
        text.append(tb(np.array(self.data[:, self.dim_integral[2]:self.dim_integral[3]])))

        text.append('\nматр B:')
        text.append(tb(np.array(self.B)))

        # text.append('\nmatrix A:')
        # text.append(tb(np.array(self.A)))

        text.append('\nматр Lambda:')
        text.append(tb(np.array(self.Lamb)))

        for j in range(len(self.Psi)):
            s = '\nматр Psi%i:' % (j + 1)
            text.append(s)
            text.append(tb(np.array(self.Psi[j])))

        text.append('\nматр a:')
        text.append(tb(self.a.tolist()))

        for j in range(len(self.Fi)):
            s = '\nматр F%i:' % (j + 1)
            text.append(s)
            text.append(tb(np.array(self.Fi[j])))

        text.append('\nматр c:')
        text.append(tb(np.array(self.c)))

        text.append('\nY построенное нормализованное :')
        text.append(tb(np.array(self.F)))

        text.append('\nY построенное:')
        text.append(tb(self.F_.tolist()))
        
        text.append('\nНормализованная невязка(max) (Y - Ф)')
        text.append(tb([self.norm_error]))
        
        text.append('\nНормализованная невязка(avg) (Y - Ф)')
        text.append(tb([self.norm_error_a]))

        text.append('\nНевязка(max) (Y_ - Ф_))')
        text.append(tb([self.error]))
        
        text.append('\nНевязка(avg) (Y_ - Ф_))')
        text.append(tb([self.error_a]))

        return '\n'.join(text)
Exemplo n.º 27
0
def _printiso(
        tsd,
        date_format=None,
        sep=',',
        float_format='%g',
        showindex='never',
        headers='keys',
        tablefmt='csv',
):
    """Separate so can use in tests."""
    sys.tracebacklimit = 1000

    if isinstance(tsd, (pd.DataFrame, pd.Series)):
        if isinstance(tsd, pd.Series):
            tsd = pd.DataFrame(tsd)

        if tsd.columns.empty:
            tsd = pd.DataFrame(index=tsd.index)

        # Not perfectly true, but likely will use showindex for indices
        # that are not time stamps.
        if showindex is True:
            if not tsd.index.name:
                tsd.index.name = 'UniqueID'
        else:
            if not tsd.index.name:
                tsd.index.name = 'Datetime'

        print_index = True
        if tsd.index.is_all_dates is True:
            if not tsd.index.name:
                tsd.index.name = 'Datetime'
            # Someone made the decision about the name
            # This is how I include time zone info by tacking on to the
            # index.name.
            elif 'datetime' not in tsd.index.name.lower():
                tsd.index.name = 'Datetime'
        else:
            # This might be overkill, but tstoolbox is for time-series.
            # Revisit if necessary.
            print_index = False

        if tsd.index.name == 'UniqueID':
            print_index = False

        if showindex in ['always', 'default']:
            print_index = True

    elif isinstance(tsd, (int, float, tuple, pd.np.ndarray)):
        tablefmt = None

    if tablefmt in ['csv', 'tsv', 'csv_nos', 'tsv_nos']:
        sep = {'csv': ',',
               'tsv': '\t',
               'csv_nos': ',',
               'tsv_nos': '\t'}[tablefmt]
        if isinstance(tsd, pd.DataFrame):
            try:
                tsd.to_csv(sys.stdout,
                           float_format=float_format,
                           date_format=date_format,
                           sep=sep,
                           index=print_index)
                return
            except IOError:
                return
        else:
            fmt = simple_separated_format(sep)
    else:
        fmt = tablefmt

    if fmt is None:
        print(str(list(tsd))[1:-1])
    elif tablefmt in ['csv_nos', 'tsv_nos']:
        print(tb(tsd,
                 tablefmt=fmt,
                 showindex=showindex,
                 headers=headers).replace(' ', ''))
    else:
        print(tb(tsd,
                 tablefmt=fmt,
                 showindex=showindex,
                 headers=headers))
Exemplo n.º 28
0
    def show(self):
        text = []

        text.append('Вводные данные: X')
        text.append(tb(np.array(self.datas[:, :self.degf[2]])))

        text.append('\nВводные данные: Y')
        text.append(tb(np.array(self.datas[:, self.degf[2]:self.degf[3]])))

        text.append('\nX нормализованный:')
        text.append(tb(np.array(self.data[:, :self.degf[2]])))

        text.append('\nY нормализованный:')
        text.append(tb(np.array(self.data[:, self.degf[2]:self.degf[3]])))

        text.append('\nматрица B:')
        text.append(tb(np.array(self.B)))

        text.append('\nматрица A:')
        text.append(tb(np.array(self.A)))

        text.append('\nматрица Lambda:')
        text.append(tb(np.array(self.Lamb)))

        for j in range(len(self.Psi)):
            s = '\nматрица Psi%i:' % (j + 1)
            text.append(s)
            text.append(tb(np.array(self.Psi[j])))

        text.append('\nматрица a:')
        text.append(tb(self.a.tolist()))

        for j in range(len(self.Fi)):
            s = '\nматрица Ф%i:' % (j + 1)
            text.append(s)
            text.append(tb(np.array(self.Fi[j])))

        text.append('\nматрица c:')
        text.append(tb(np.array(self.c)))

        text.append('\nY перестроенное нормализованное :')
        text.append(tb(np.array(self.F)))

        text.append('\nY перестроенное :')
        text.append(tb(self.F_.tolist()))

        text.append('\nНормализованная невязка(max) (Y - Ф)')
        text.append(tb([self.norm_error]))

        text.append('\nНормализованная невязка(avg) (Y - Ф)')
        text.append(tb([self.norm_error_a]))

        text.append('\nНевязка(max) (Y_ - Ф_))')
        text.append(tb([self.error]))

        text.append('\nНевязка(avg) (Y_ - Ф_))')
        text.append(tb([self.error_a]))

        return '\n'.join(text)
Exemplo n.º 29
0
# Correlation matrix 
startup50.corr()

np.mean(startup50)
startup50['Profit'].mean() 
startup50['Profit'].median()
startup50['Profit'].mode()
startup50['Profit'].var()
startup50['Profit'].std()

print(startup50.describe())
descriptive = startup50.describe()

from tabulate import tabulate as tb
print(tb(descriptive,startup50.columns))

######### boxplots ###########

import seaborn as sns

plt.boxplot(startup50.Profit)
plt.xticks([1,], ['Profit'])
plt.boxplot(startup50.RandDSpend)
plt.xticks([1,], ['R&D Spend'])
plt.boxplot(startup50.Administration)
plt.xticks([1,], ['Administration'])
plt.boxplot(startup50.MarketingSpend)
plt.xticks([1,], ['MarketingSpend'])
plt.boxplot(startup50.State)
plt.xticks([1,], ['State'])
Exemplo n.º 30
0
error = np.zeros(len(x[0]))
for limit, i in zip(x[0], range(0, len(x[0]))):
    inte[i] = quad(f, 0, limit)[
        0]  #integration now doing(quad returns integral and error value)

inte_def = np.arctan(x[0])
error = abs(inte_def - inte)
li = list()
#------------tabulating-----------
for ve, i_d, tan in zip(x[0], inte_def, inte):
    l_temp = [ve, i_d, tan]
    li.append(l_temp)

first_row = ["x-values", "arctan function", "Integral defined values"]
li.insert(0, first_row)
print(tb(li, tablefmt='psql', headers="firstrow"))

#-------------First plot---------
mpt.figure(1)
mpt.plot(x[0], inte, 'ro')
mpt.plot(x[0], inte_def, color='black', linewidth=2)
mpt.xlabel("x")
mpt.ylabel("arctan")
mpt.title(r"Plot of $\int_{0}^{x} dt/{1+t^{2}}$")
mpt.legend(('Integral value', 'Arctan'))
#------------Ending plot 1--------------

#-------------Second subplot----------
mpt.figure(2)
mpt.plot(x[0], error, 'ro')
mpt.yscale("log")
Exemplo n.º 31
0
    balances = exchange.get_all_balances()
    print(" %s balances info:" % (args.exchange) )
    #print(tb(balances))

    if exchange.kline_data_type == kl.KLINE_DATA_TYPE_LIST:
        closeseat = exchange.kline_idx_close
    else:
        closeseat = exchange.kline_key_close

    total_value = 0
    for item in balances:
        amount = max(get_balance_free(item), get_balance_frozen(item))
        if amount < 0:
            continue

        coin = get_balance_coin(item)
        if coin.upper() == args.basecoin.upper():
            value = amount
        else:
            #print(coin)
            symbol = creat_symbol(coin, args.basecoin)
            klines = exchange.get_klines_1min(symbol, size=1)
            price = float(klines[-1][closeseat])
            value = price * amount

        total_value += value
        item['value'] = value

    print(tb(balances))
    print("total value: %s  %s" % (total_value, args.basecoin))
Exemplo n.º 32
0
# print(Fore.RED+"[MENU]"
#       "\nItem 1 [Cheese Burger]: $6"
#       "\nItem 2 [Fries]: $3"
#       "\nItem 3 [Tenders]: $4"
#       "\nItem 4 [Soda]: $2")

mntbl = {
    'Item #': list(range(1, numOrderList + 1)),
    'Item Name': OrderList,
    'Item Price': ItemP
}
mntblDF = pd.DataFrame(mntbl, columns=['Item #', 'Item Name', 'Item Price'])

print(Fore.RED + "[MENU]")
print(tb(mntblDF, headers='keys', tablefmt='psql'))

#Make list for VALUES through user input
OrderQuant = []
for a in range(1, numOrderList + 1):
    print(Fore.BLACK + "How many of Item", a, "would you like to order?")
    quant = int(input("Please indicate the quantity here:"))
    OrderQuant.append(quant)

#Merge two lists above to make dictionary for order_n
merge = {OrderList[i]: OrderQuant[i] for i in range(len(OrderList))}

#Calculating subtotals

#Repeat order back to customer & Record sales in form of receipt
print(Fore.BLUE + "Thank you for dining with us.\nPlease confirm your order.")
Exemplo n.º 33
0
causas={1:'EMBRIAGUEZ O DROGA',2:'MAL REBASAMIENTO INVADIR CARRIL',3:'EXCESO VELOCIDAD',4:'IMPERICIA E IMPRUDENCIA DEL CONDUCTOR',\
       5:'IMPRUDENCIA  DEL PEATÓN',6:'DAÑOS MECÁNICOS',7:'NO RESPETA LAS SEÑALES DE TRÁNSITO',\
       8:'FACTORES CLIMÁTICOS',9:'MAL ESTADO DE LA VÍA',10:'OTRAS CAUSAS'}
zonas = {2: 'RURAL', 1: 'URBANA'}

data = []
f = open('Datanew.csv', 'r')
for line in f:
    line = line.strip()
    provincia, canton, mes, dia, hora, clase, causa, zona, nh, nf, tv = line.split(
        ',')
    l=[cantones[int(canton[1::])],zonas[int(zona)],clases[int(clase)],causas[int(causa)],meses[int(mes)],\
       dias[int(dia)],horas[int(hora)],nh,nf,tv]
    data.append(l)

columnas = [
    'Canton', 'Zona', 'Clase', 'Causa', 'Mes', 'Dia', 'Hora', 'Heridos',
    'Fallecidos', 'Total'
]

d1 = pd.DataFrame(data, columns=columnas)
#print(tb(d1,tablefmt='grid',stralign='center',showindex=False,headers=columnas))
#d2=d1.groupby(['Clase'])
f2 = open('Data.txt', 'w')
f2.write(
    tb(d1,
       tablefmt='grid',
       stralign='center',
       showindex=False,
       headers=columnas))
f2.close()
Exemplo n.º 34
0
    def show(self):
        text = []

        text.append('Inputed : X')
        text.append(tb(np.array(self.datas[:, :self.degf[2]])))

        text.append('\nInputed : Y')
        text.append(tb(np.array(self.datas[:, self.degf[2]:self.degf[3]])))

        text.append('\nNormalised X:')
        text.append(tb(np.array(self.data[:, :self.degf[2]])))

        text.append('\nNormalised Y:')
        text.append(tb(np.array(self.data[:, self.degf[2]:self.degf[3]])))

        text.append('\nmatrix A:')
        text.append(tb(np.array(self.A)))

        text.append('\nmatrix B:')
        text.append(tb(np.array(self.B)))

        text.append('\nmatrix Lambda:')
        text.append(tb(np.array(self.Lamb)))

        for j in range(len(self.Psi)):
            s = '\nmatrix Psi%i:' % (j + 1)
            text.append(s)
            text.append(tb(np.array(self.Psi[j])))

        text.append('\nmatrix a:')
        text.append(tb(self.a.tolist()))

        for j in range(len(self.Fi)):
            s = '\nmatrix F%i:' % (j + 1)
            text.append(s)
            text.append(tb(np.array(self.Fi[j])))

        text.append('\nmatrix c:')
        text.append(tb(np.array(self.c)))

        text.append('\nY rebuilt normalized :')
        text.append(tb(np.array(self.F)))

        text.append('\nY rebuilt :')
        text.append(tb(self.F_.tolist()))

        text.append('\nError normalised (Y - F)')
        text.append(tb([self.norm_error]))

        text.append('\nError (Y - F)')
        text.append(tb([self.error]))

        return '\n'.join(text)