def load(self):
        self.parentregion = self.getParentRegion()
        self.sourceofdata = self.getSourceOfData()
        self.loadRegionalData()
        summarydf = pd.DataFrame(columns=self.columns)
        # Initalize variables
        tabledata = []
        rwdata = []

        # Derive values
        location = self.parentregion
        confirmed = self.dfRegion[ColoumnName.Totalconfirmed.value].sum()
        dead = self.dfRegion[ColoumnName.Death.value].sum()
        recovered = self.dfRegion[
            ColoumnName.Cured_Discharged_Migrated.value].sum()
        confirmed_internal = self.dfRegion[
            ColoumnName.TotalConfirmedcases_IndianNational.value].sum()
        confirmed_external = self.dfRegion[
            ColoumnName.TotalConfirmedcases_ForeignNational.value].sum()
        motalityrate = dead / confirmed
        # Add it to the list
        rwdata.append(self.dtestr)
        rwdata.append(location)
        rwdata.append(confirmed)
        rwdata.append(dead)
        rwdata.append(recovered)
        rwdata.append(confirmed_internal)
        rwdata.append(confirmed_external)
        rwdata.append(motalityrate)
        # Add to master List
        tabledata.append(rwdata)
        # Assign it master dataframe
        self.dfSummary = pd.DataFrame(data=tabledata, columns=self.columns)
def parse_recommendations(raw_datas, recommendations_dict):
    columns = ['date_str', 'recommendation', 'ticker']
    df = pd.DataFrame(columns=columns)
    ticker = ''
    for raw_data in raw_datas['data']:
        data = raw_data.replace('\t', ' ')
        if ' ' in data:
            data_splitted = data.split(' ')
            date_str = parse_date(data_splitted[0])
            if(ticker == ''):
                raise SyntaxError(
                    f'Ticker must be defined before recommendation row, line "{data}"".')
            recommendation = parse_recommendation_text(
                data_splitted[1], recommendations_dict)
            row = pd.DataFrame(
                np.array([[date_str, recommendation, ticker]]), columns=columns)
            if len(df) == 0:
                df = df.append(row)
            else:
                last_row = df.tail(1).iloc[0]
                if last_row['ticker'] == ticker and last_row['date_str'] >= date_str:
                    raise ValueError(
                        f'Ticker {ticker} has suspicious order of dates, line "{data}", previous date {last_row["date_str"]}')
                if last_row['ticker'] != ticker or last_row['recommendation'] != recommendation:
                    df = df.append(row)
        else:
            if ' ' in data or ',' in data:
                raise ValueError(
                    f'Ticker {ticker} has erroneous line "{data}"')
            if data == '-END-':
                return df
            if data != '':
                ticker = data

    return df
Esempio n. 3
0
    def _plot_flux_variability_analysis(self, index, variables=None, title=None,
                                        width=None, height=None, palette=None, grid=None):
        if variables is None:
            variables = self.reference_fva.index[0:10]

        title = "Compare WT solution %i" % index if title is None else title

        wt_fva_res = self.reference_fva.loc[variables]
        strain_fva_res = self.nth_panel(index).loc[variables]
        dataframe = pandas.DataFrame(columns=["lb", "ub", "strain", "reaction"])
        for reaction_id, row in wt_fva_res.iterrows():
            _df = pandas.DataFrame([[row['lower_bound'], row['upper_bound'], "WT", reaction_id]],
                                   columns=dataframe.columns)
            dataframe = dataframe.append(_df)

        for reaction_id, row in strain_fva_res.iterrows():
            _df = pandas.DataFrame([[row['lower_bound'], row['upper_bound'], "Strain %i" % index, reaction_id]],
                                   columns=dataframe.columns)
            dataframe = dataframe.append(_df)

        plot = plotter.flux_variability_analysis(dataframe, grid=grid, width=width, height=height,
                                                 title=title, x_axis_label="Reactions", y_axis_label="Flux limits",
                                                 palette=palette)

        plotter.display(plot)
Esempio n. 4
0
    def save_matrices_to_csv(self, step=None):
        print("Saving matrix to CSV")
        basepath = 'matrices/'
        filepath = basepath + 'odm-{}'
        if step is not None:
            filepath = filepath + '-step{}'.format(step)
        filepath = filepath + '.csv'

        matrix_size = len(self.matrices['ESC1'])
        aggregate_matrix = np.zeros(shape=(matrix_size, matrix_size),
                                    dtype=int)

        labels = self.zones + ['Total']

        for matrix_type in self.matrices:
            filename = filepath.format(matrix_type)
            aggregate_matrix = np.add(aggregate_matrix,
                                      self.matrices[matrix_type],
                                      dtype=int)
            pandas.DataFrame(self.matrices[matrix_type],
                             index=labels,
                             columns=labels).to_csv(filename)

        self.matrices['AGG'] = aggregate_matrix
        filename = filepath.format('aggregate')
        pandas.DataFrame(aggregate_matrix, index=labels,
                         columns=labels).to_csv(filename)
Esempio n. 5
0
 def create_confusion_matrix(self, test, prediction, columns):
     matrix = confusion_matrix(test, prediction)
     if columns is not None:
         self.confusion_matrix = pd.DataFrame(matrix, index = columns, columns = columns)
     else:
         self.confusion_matrix = pd.DataFrame(matrix)
     self.indexes = IndexesCalculator(self.confusion_matrix)
def util_create_scenery(df, historic_count,drop_columns):
    final = df
    for x in range(0,100):  # df['cnv_date'].size):
        final_data = util_scenario(df.iloc[x],df, x, historic_count,drop_columns)
        if x == 1:
            final = pandas.DataFrame(data=final_data)
            final = final.T
        else:
            final_tmp = pandas.DataFrame(data=final_data)
            final_tmp = final_tmp.T
            final = final.append(final_tmp)
    return final
Esempio n. 7
0
 def ReadStructureFile(self, fpath):
     fname = None
     if self.usePoscars == True:
         fname = 'POSCAR'
     else:
         fname = 'CONTCAR'
     f = open(fpath + '\\' + fname, "r")
     lines = f.readlines()
     atomdict = dict(zip(lines[5].split(), list(map(int, lines[6].split()))))
     atomorder = lines[5].split()
     dim = self.maxMoleculeSize + (self.numberOfMetalAtomsInEachLayer*2)
     struct = pd.DataFrame(index=range(0, dim), columns=['atom','x','y','z'])
     cho_order = []
     currline = 2
     mat = np.zeros((3,3))
     for i in range(3):
         mat[i,:] = list(map(float, lines[currline].split()))
         currline = currline + 1
     currline = 9
     i = 0
     for atom in atomorder:
         linesToRead = atomdict[atom]            
         if atom == 'C' or atom == 'H' or atom == 'O':   
             cho_order.append(atom)
             for line in range(linesToRead):
                 words = lines[currline].split()
                 v = np.array([float(k) for k in words[:3]])
                 w = np.dot(mat,v)
                 struct.iloc[i, 0:4] = atom, w[0], w[1], w[2] 
                 i = i + 1
                 currline = currline + 1
         elif self.excludeMetal == True:
             currline = currline + linesToRead
         elif self.excludeMetal == False:
             cho_order.append(atom)
             metalCoords = pd.DataFrame(index=range(0, self.atomMaxDict['M']), columns=range(0,4))
             for line in range(linesToRead):
                 words = lines[currline].split()
                 v = np.array([float(k) for k in words[:3]])
                 w = np.dot(mat,v)
                 metalCoords.iloc[line, 0:4] = atom, w[0], w[1], w[2]
                 currline = currline + 1
             p = metalCoords.sort_values(by=[3], ascending=False)
             for j in range(self.numberOfMetalAtomsInEachLayer*2):
                 struct.iloc[i, 0:4] = atom, p.iloc[j,1], p.iloc[j,2], p.iloc[j,3]
                 i = i + 1
             
     while i < dim:
         struct.iloc[i, :] = 'X', 0.0, 0.0, 0.0
         i = i + 1
     return struct, cho_order, atomdict
Esempio n. 8
0
    def fill_workers_surveys(self):
        print("Generating workers surveys")

        row_list = []

        workers_columns = self.columns + ['WORK-TYPE']

        for place in self.surveys_to_generate:
            origin = place['from']
            prim_number = place['Setor primário']
            sec_number = place['Secundário']
            tert_number = place['Terciário']
            workplaces = self.generate_zone_workplaces(prim_number, sec_number, tert_number)
            for workplace in workplaces:
                d = dict.fromkeys(self.columns, '')
                d['ORG-LUG'] = origin
                d['DEST-LUG'] = workplace['zone']
                d['WORK-TYPE'] = workplace['type']
                row_list.append(d)

        random.shuffle(row_list)

        df = pandas.DataFrame(row_list, columns=workers_columns)

        with ExcelWriter(self.workers_filepath) as writer:
            df.to_excel(writer)
Esempio n. 9
0
 def __init__(self, master=None, **kwargs):
     tk.Frame.__init__(self, master, **kwargs)
     self.pack()
     master.title('WordCounter v0.1')
     master.geometry("1000x600")
     self.request(contents)
     self.filter_it(titles)
     df_words = pd.DataFrame(l, columns=['Word'])
     plt.style.use('seaborn')
     figure = plt.Figure(figsize=(10, 5), dpi=100)
     ax = figure.add_subplot(111)
     canvas = FigureCanvasTkAgg(figure, self)
     canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
     df = df_words['Word'].value_counts(
         ascending=False)[:10].rename_axis('Word').reset_index(
             name='Counts')
     graph = df.plot.barh(x='Word',
                          y='Counts',
                          rot=0,
                          color='#f23d3d',
                          ax=ax)
     ax.set_xlabel("Word Count", weight='bold')
     ax.set_ylabel("Word", weight='bold')
     ax.set_title("Wordcounter", weight='bold')
     graph.invert_yaxis()
     plt.tight_layout()
     self.text1 = tk.Text(self, height=20, width=50)
     self.scroll = tk.Scrollbar(self, command=self.text1.yview)
     self.text1.configure(yscrollcommand=self.scroll.set)
     self.text1.insert(tk.END, titles, 'color')
     self.text1.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
     self.scroll.pack(side=tk.LEFT, fill=tk.Y)
def generate_metadata(local_path, local_project, local_filename):
    url = local_path + local_project + local_filename
    df = pd.read_excel(url)

    feature = []
    feature_description = []
    feature_group = []

    for column_indexer in range(0, len(df.columns)):
        feature.append(df.columns[column_indexer])
        feature_description.append(df.iloc[0, column_indexer])
        feature_group.append('-')

    #print( feature )
    #print( feature_description )

    df_metadata = pd.DataFrame(
        list(zip(feature, feature_description, feature_group)),
        columns=['Feature Name', 'Feature Description', 'Feature Group'])

    url = local_path + local_project + '_metadata.xlsx'
    print(f'url (save excel): {url}')
    #df_metadata.to_excel(local_path + local_project + '_metadata.xlsx')

    print(df_metadata)
Esempio n. 11
0
def predict_nums(df, band):
    data = df[df.band == band]
    X = data['total']

    size = int(len(X) * 0.85)
    train, test = X[0:size], X[size:len(X)]
    test = np.append(test, 135) # predict를 위한 추가 기준값

    # non- stationary
    history = [float(x) for x in train]
    predictions = list()
    diff = list()

    for t in range(len(test)):
        model = ARIMA(history, order=(5,1,0))
        model_fit = model.fit(disp=0)
        output = model_fit.forecast(alpha=1)
        yhat = output[0]
        predictions.append(yhat)
        obs = test[t]
        history.append(obs)
        diff.append(yhat - obs)

    df = pd.DataFrame(diff)
    diff = df.describe()
    return (int(predictions[-1]), int(diff[0].loc['25%']), int(diff[0].loc['75%']))
Esempio n. 12
0
 def EncodeUsingCM(self, structFile, outFileName, r_power = 1, useCutoff = False, cutoff_dist_angstrom = 4.0,
                  setValForMetalAtomicNumType = None):
     f = open(structFile, 'rb')
     allStructs = pickle.load(f)
     f.close()
     if setValForMetalAtomicNumType is not None:
         self.metalAtomicNumType = setValForMetalAtomicNumType
     structs = allStructs['structs']
     metals = allStructs['metal']
     species = allStructs['species']
     metalDescs = allStructs['metalDescs']
     dim = self.maxMoleculeSize + (self.numberOfMetalAtomsInEachLayer*2)
     df = pd.DataFrame(index=range(0, len(structs)), columns=range(0, dim + 2 + metalDescs.shape[1]))
     df_row = 0
     for struct in structs:
         cmvec = self.BuildCoulombMatrixFromStruct(struct, r_power, useCutoff, cutoff_dist_angstrom)
         df.iloc[df_row, 0] = metals[df_row]
         df.iloc[df_row, 1] = species[df_row]
         for i in range(metalDescs.shape[1]):
             df.iloc[df_row, i+2] = metalDescs[df_row, i]
         l = 2 + metalDescs.shape[1]
         for k in range(dim-1, -1, -1):
             df.iloc[df_row, l] = cmvec[k]
             l += 1
         df_row += 1
         if df_row % 100 == 0 and self.isVerbose == True:
             print(df_row)
     df.to_csv(outFileName)
Esempio n. 13
0
 def GetBoBStruct(self, struct, cho_order, atomdict):
     structbob = pd.DataFrame(index=range(0, struct.shape[0]), columns=range(0,6))
     if 'C' not in atomdict:
         cho_order.append('C')
         atomdict['C'] = 0
     if 'H' not in atomdict:
         cho_order.append('H')
         atomdict['H'] = 0
     if 'O' not in atomdict:
         cho_order.append('O')
         atomdict['O'] = 0
     row = 0
     j = 0
     for cho in cho_order:
         if cho == 'C' or cho == 'H' or cho == 'O':
             for i in range(atomdict[cho]):
                 structbob.iloc[row, :] = struct.iloc[j, 0], struct.iloc[j, 1], struct.iloc[j, 2], struct.iloc[j, 3], \
                                          'reg', struct.iloc[j, 0]
                 row = row + 1
                 j = j + 1
             for i in range(self.atomMaxDict[cho] - atomdict[cho]):
                 structbob.iloc[row, :] = cho, 0.0, 0.0, 0.0, 'pad', cho
                 row = row + 1
         elif self.excludeMetal == False:
             for i in range(self.numberOfMetalAtomsInEachLayer*2):
                 structbob.iloc[row, :] = 'W', struct.iloc[j, 1], struct.iloc[j, 2], struct.iloc[j, 3], 'reg', \
                                          struct.iloc[j, 0]
                 row = row + 1
                 j = j + 1
     structbob_sorted = structbob.sort_values(by=[0,4], ascending=[True,False])        
     return structbob_sorted
Esempio n. 14
0
def compile_data():
    with open("sp500tickers.pickle", "rb") as f:
        tickers = pickle.load(f)

    main_df = pd.DataFrame()

    for count, ticker in enumerate(tickers):
        df = pd.read_csv('stock_dfs/{}.csv'.format(ticker))
        df.reset_index(inplace=True)
        df.set_index('Date', inplace=True)

        df.rename(columns={'Adj Close': ticker}, inplace=True)
        df.drop(['Open', 'High', 'Low', 'Close', 'Volume'], 1, inplace=True)

        if main_df.empty:
            main_df = df
        else:
            main_df = main_df.merge(df, how='outer')

        gc.collect()

        if count % 10 == 0:
            print(count)

    main_df.head()
    main_df.to_csv('sp500_joined_closes.csv')
def generate_metadata(  local_group, local_project ):
    LOCAL_PATH          = f'./data/{local_group}/{local_project}/'
    LOCAL_ORIGINAL_DATA = '_0_cat.xlsx'

    url = LOCAL_PATH + local_project + LOCAL_ORIGINAL_DATA
    print(f'url (original)  : {url}')    
    df = pd.read_excel( url )

    df = df_initial.drop(columns=['IPAddress', 'RecipientLastName', 'RecipientFirstName', 'RecipientEmail','ExternalReference', 'UserLanguage'])


    feature = []
    feature_description = []
    feature_group = []

    for column_indexer in range(0 , len(df.columns)):
        feature.append( df. columns[column_indexer] )
        feature_description.append( df.iloc[0,column_indexer])
        feature_group.append('-')
        
    #print( feature )
    #print( feature_description )  

    df_metadata = pd.DataFrame(list(zip(feature, feature_description, feature_group)), 
               columns =['Feature Name', 'Feature Description', 'Feature Group'])  

    url = LOCAL_PATH + local_project + '_metadata.xlsx'
    print(f'url (save excel): {url}')
    df_metadata.to_excel( url )          
    
    print( df_metadata )
Esempio n. 16
0
    def Download_finviz(self):
        a = finvizObj()
        t0 = time()
        k = 0
        self.finviz_result = []
        for symb1 in self.symb:
            try:
                tmp = a.get_keys(symb1)
                tmp1 = pd.DataFrame([tmp], columns=tmp.keys())
                tmp1['symb'] = symb1
                tmp1['status'] = 1

                self.finviz_result.append(tmp1)
                print('TK' + str(k) + ' :: ' + symb1 +
                      ' key matrix from Finviz is loaded sucessfully')
            except:
                #tmp1['status']=0
                #tmp1['symb']=symb1
                #self.finviz_result.append(tmp1)
                print('TK' + str(k) + ' :: ' + symb1 +
                      ' Load error in finviz parser, Check symbol please')
                continue

            k = k + 1
        print(
            '_____________Key ratios from Finviz Download______________________'
        )
        print('All done Total time ' + str(int(time() - t0)) + ' Seconds')
def generate_metadata(local_group, local_project):
    LOCAL_PATH = f'./data/{local_group}/{local_project}/'
    LOCAL_ORIGINAL_DATA = '_0_cat.xlsx'

    url = LOCAL_PATH + local_project + LOCAL_ORIGINAL_DATA
    df = pd.read_excel(url)

    feature = []
    feature_description = []
    feature_group = []

    for column_indexer in range(0, len(df.columns)):
        feature.append(df.columns[column_indexer])
        feature_description.append(df.iloc[0, column_indexer])
        feature_group.append('-')

    #print( feature )
    #print( feature_description )

    df_metadata = pd.DataFrame(
        list(zip(feature, feature_description, feature_group)),
        columns=['Feature Name', 'Feature Description', 'Feature Group'])

    url = local_path + local_project + '_metadata.xlsx'
    print(f'url (save excel): {url}')
    #df_metadata.to_excel(local_path + local_project + '_metadata.xlsx')

    print(df_metadata)
Esempio n. 18
0
 def readFileSlot(self):
     self.message.setText("")
     global messageType
     self.path = QListWidgetItem(self.lstbox.currentItem()).text()
     self.showProgress = PopUpProgressBar()
     if self.path != '':
         self.showProgress.show()
         index = linkNames.index(self.path)
         self.parseData = Parser(str(links[index]))
         pickleName = self.parseData.parse(self.showProgress)
         self.testing = Testing(pickleName)
         self.results = self.testing.analyse(self.showProgress)
         self.outputResult = list()
         for result in self.results:
             self.outputResult.append(result[:-1])
         self.dataFrame = pd.DataFrame(
             self.outputResult, columns=['File', 'Confidence', 'Motion'])
         print(self.dataFrame)
         self.text = ""
         for result in self.results:
             self.text = result[0] + " is " + result[1] + "% " + result[2]
             self.textbox.addItem(self.text)
         self.showProgress.close()
         if self.text == "":
             messageType = 1
             self.systemMessage = PopUpMessageBox()
         else:
             self.hasOutput = True
     else:
         messageType = 2
         self.systemMessage = PopUpMessageBox()
Esempio n. 19
0
    def fill_students_surveys(self):
        print("Generating students surveys")

        row_list = []

        students_columns = self.columns + ['SCHOOL-TYPE']

        for place in self.surveys_to_generate:
            origin = place['from']
            number = place['Estudantes']
            
            schools = self.generate_zone_schools()
            
            for _ in range(number):
                d = dict.fromkeys(self.columns, '')
                d['ORG-LUG'] = origin
                school = schools[random.randint(0, len(schools) - 1)]
                d['DEST-LUG'] = school['zone']
                d['SCHOOL-TYPE'] = school['type']
                row_list.append(d)

        random.shuffle(row_list)

        df = pandas.DataFrame(row_list, columns=students_columns)

        with ExcelWriter(self.students_filepath) as writer:
            df.to_excel(writer)
def  save_metadata_alternative(  project_relative_root_path, local_project ):
    url = project_relative_root_path + local_project + DATA_SOURCE_SUFFIX

    print(f'url (original)  : {url}')    
    df = pd.read_excel( url )

    df = df.drop( columns = [
          'StartDate'
    	, 'EndDate'
        , 'Status'
        , 'IPAddress' 
        , 'RecipientLastName'	
        , 'RecipientFirstName'	
        , 'RecipientEmail'
        , 'ExternalReference'
        , 'LocationLatitude'
        , 'LocationLongitude'	
        , 'DistributionChannel'
        , 'UserLanguage'
        , 'RecordedDate'
        , 'ResponseId'
    ], axis = 1 )

    # rename - Duration (in seconds)
    df.rename(columns = {'Duration (in seconds)':'Duration'}, inplace = True)

    feature = []
    feature_label = []
    feature_description = []
    feature_group = []
    drop_down = []
    data_types = []

    for column_indexer in range(0 , len(df.columns)):
        name = df.columns[column_indexer]
        description = df.iloc[0,column_indexer]
        
        data_types.append(  df[name].dtypes.name )

        feature.append( name )
        feature_description.append( description )  

        label = ''
        group = ''
        label, group, alias = get_label_group( name, description )
        feature_label.append( label )
        feature_group.append( group )
        drop_down.append( alias )
        chart_type.appen ( 'Single' )

    # SAVE
    df_metadata = pd.DataFrame(list(zip(feature, feature_label, feature_group, feature_description, drop_down)), 
               columns =['FeatureName', 'FeatureLabel', 'FeatureGroup', 'FeatureDescription', 'DropDown'])  

    url = project_relative_root_path + local_project + METADATA_SUFFIX
    print(f'url (save {METADATA_SUFFIX}): {url}')
    print(df_metadata.shape)
    df_metadata.to_excel( url , index = False, encoding = 'utf-8')          
    
    return df
Esempio n. 21
0
def DCACodePicker(data, Keyword):
    #check whether data is a dataframe
    if (type(data) == pd.core.frame.DataFrame):
        #checks whether 'DCA_CODE' is a column heading
        if 'DCA_CODE' in data:
            #checks whether user input a keyword
            if (Keyword):
                #makes sure data is formatted
                data = data[[col for col in data]]
                #create a new dataframe to return
                RelevantData = pd.DataFrame({'': []})
                #this iterates through the data row by row
                for index, row in data.iterrows():
                    currkeywords = row['DCA_CODE']
                    #if a user key word is in the row's DCA CODE then, that row of data gets added to the df
                    if (Keyword in currkeywords):
                        RelevantData = data.append(row, ignore_index=True)

                return RelevantData
            else:
                return 'No Search Term Supplied'
        else:
            return 'Incorrect Data'
    elif (data == 'Invalid Date'):
        return 'Invalid Date'
    else:
        return 'Unknown Error'
Esempio n. 22
0
def countTfbsInTissues(df):
    for tissueName in tissueNames:
        tfsInTissue[tissueName] = dict()

    df.apply(lambda row: addTfbsToTfCounts(row), axis=1)

    print("Creating dataframe")
    cols = ['tfName']
    for tissueName in tissueNames:
        cols.append(tissueName)
    cols.append('totalBS')
    rows = []
    for tfName in tfs:
        row = dict()
        row['tfName'] = tfName
        totalBS = 0
        for tissueName in tissueNames:
            amount = 0
            if tfName in tfsInTissue[tissueName]:
                amount = tfsInTissue[tissueName][tfName]
            row[tissueName] = amount
            totalBS += amount
        row['totalBS'] = totalBS
        rows.append(row)
    tfDf = pd.DataFrame(rows, columns=cols)
    tfDf['median'] = tfDf[np.array(list(tissueNames))].median(axis=1)
    tfDf['mean'] = tfDf[np.array(list(tissueNames))].mean(axis=1)
    #tfDf['mostUsedInTissue'] = tfDf.apply(lambda row: mostUsedInTissue(row), axis=1)
    #tfDf['diffFactor'] = tfDf.apply(lambda row: calcDiffFactor(row), axis=1)
    #tfDf.sort(['diffFactor'], inplace=True, ascending=False)
    return tfDf
 def logData(self, i, x, candidate, meanVal, mse, secmeanVal, secmse,
             accratio, isaccepted):
     if self.df is None:
         self.collist = [
             'pred1_mean_cand', 'pred1_mean_x', 'pred1_mse_cand',
             'pred1_mse_x', 'pred1_mse_cov', 'pred2_mean_cand',
             'pred2_mean_x', 'pred2_mse_cand', 'pred2_mse_x',
             'pred2_mse_cov', 'accratio', 'isaccepted', 'evaluationSoFar'
         ]
         self.df = pd.DataFrame(index=range(self.n),
                                columns=self.collist +
                                ['x' + str(i) for i in range(self.dim)] +
                                ['cand' + str(i) for i in range(self.dim)])
     self.df.iloc[i, :5] = meanVal[0, 0], meanVal[1,
                                                  0], mse[0,
                                                          0], mse[1,
                                                                  1], mse[0,
                                                                          1]
     self.df.iloc[i, 5:10] = secmeanVal[0, 0], secmeanVal[1, 0], secmse[
         0, 0], secmse[1, 1], secmse[0, 1]
     self.df.iloc[i, 10] = accratio
     self.df.iloc[i, 11] = isaccepted
     self.df.iloc[i, 12] = self.numberOfFuncEvals
     self.df.iloc[i, len(self.collist):len(self.collist) + self.dim] = x
     self.df.iloc[i, len(self.collist) + self.dim:] = candidate
Esempio n. 24
0
def main():
    print('carga de archivo')
    print('................')
    print('\n')
    dirname = os.path.dirname(__file__)
    filename = os.path.join(dirname, '../data/FormatoDeAlmacenamiento1.csv')
    df = pd.read_csv(filename, sep=';', header=None, na_values=" NaN")

    #print (df)
    hora = df[0].str.extract('((?:[01]\d|2[0-3]):[0-5]\d:[0-5]\d)')
    #print (hora)
    #remplazo los na por cero
    df2 = pd.DataFrame()
    df2 = df2.fillna(0)
    df2[0] = df[0].str.extract('((?:[01]\d|2[0-3]):[0-5]\d:[0-5]\d)')
    df2[1] = df[1]
    #print(df2)
    print("se agrupa por minutos")
    df2[0] = pd.DatetimeIndex(df2[0])
    df2.set_index(keys=0, inplace=True)
    ini = datetime.time(00, 18, 0)
    fin = datetime.time(23, 59, 0)
    df3 = df2[[1]].between_time(ini, fin)
    df3 = df3.groupby([1])[[1]].count()
    print(df3)
Esempio n. 25
0
def calcExpressionForEachTissue(tFactorDF):
    newDFRows = []
    tFactorDF.apply(lambda row: getUniquenessPerTF(row, newDFRows), axis=1)
    diffFactorDF = pd.DataFrame(newDFRows,
                                columns=['tfName', 'tissue', 'diffFactor'])
    diffFactorDF.sort(['diffFactor'], inplace=True, ascending=False)
    return diffFactorDF
Esempio n. 26
0
    def parsed_to_panel_dataframe(self, parsed):
        """
        Recibe una lista de diccionarios a partir de la cual crea el dataframe del panel.
        Devuelve una lista de diccionarios con los datos del panel a partir de lo que recibe.

        Parameters
        ----------
        parsed_by_currency: lista de diccionarios por día de una moneda.
        """
        def create_multi_index_column(field_title):
            """Crea multi index desarmando el título de un campo."""
            tc, ars, coin, entity, channel, flow, hour = field_title.split("_")
            return (coin, entity, channel, flow, hour)

        df = pd.DataFrame(parsed.values()).set_index("indice_tiempo")
        df.sort_index(inplace=True)
        df.columns = pd.MultiIndex.from_tuples(
            [create_multi_index_column(col) for col in df.columns])
        df_panel = df.stack([-5, -4, -3, -2, -1], dropna=False).reset_index()
        df_panel.columns = [
            "indice_tiempo", "coin", "entity", "channel", "flow", "hour",
            "value"
        ]
        df_panel.columns = [
            "indice_tiempo", "moneda", "entidad_bancaria", "canal", "flujo",
            "hora", "valor"
        ]
        df_panel["indice_tiempo"] = df_panel["indice_tiempo"].apply(
            lambda x: x)
        df_panel["valor"] = df_panel["valor"].apply(lambda x: x
                                                    if x and x > 0 else None)
        panel_data = df_panel.to_dict(orient="records")

        return panel_data
Esempio n. 27
0
    def main(self):
        try:
            self.goto_room()
            finally_data = pd.DataFrame(columns=['期数', '金额', '收益'])
            for i in range(20):
                print(finally_data)
                print(
                    '-----------------------------------------round:{}---------------------------------------'
                    .format(i + 1))
                self.send_email(text='''
                    上一轮收益为:{}\n
                    即将进行的是round:{}\n
                    '''.format(finally_data['收益'].sum(), i + 1))
                data = self.plan_process()
                finally_data = finally_data.append(data)

            print(finally_data)

        except TimeoutException:
            print('TimeoutException错误,修正后再出发。。。')
            self.send_email(text='TimeoutException错误,修正后再出发。。')
        except WebDriverException:
            print('WebDriverException错误,修正后再出发。。。')
            self.send_email(text='WebDriverException错误,修正后再出发。。')
        finally:
            print('矿机已停止运转,欢迎下次使用。。。')
Esempio n. 28
0
    def _clear_vrf_tab(self, vrf_dictionary):
        """Builds and writes the "Clear VRF's" worksheet."""
        clear_vrf_df = pd.DataFrame(
            self._check_vrf_against_entire_db(vrf_dictionary))

        clear_vrf_df.to_excel(
            self.writer, sheet_name='Clear-VRF', index=False,
            header=["Clear VRF's"])
Esempio n. 29
0
def save_metadata(project_relative_root_path, local_project):

    url = project_relative_root_path + local_project + DATA_SOURCE_SUFFIX

    print(f'url (original)  : {url}')
    df = pd.read_excel(url)

    df = df.drop(columns=[
        'StartDate', 'EndDate', 'Status', 'IPAddress', 'RecipientLastName',
        'RecipientFirstName', 'RecipientEmail', 'ExternalReference',
        'LocationLatitude', 'LocationLongitude', 'DistributionChannel',
        'UserLanguage', 'RecordedDate', 'ResponseId'
    ],
                 axis=1)

    # rename - Duration (in seconds)
    df.rename(columns={'Duration (in seconds)': 'Duration'}, inplace=True)

    feature = []
    feature_label = []
    feature_description = []
    feature_group = []

    for column_indexer in range(0, len(df.columns)):
        name = df.columns[column_indexer]
        description = df.iloc[0, column_indexer]
        feature.append(name)
        feature_description.append(description)

        if name == 'Progress' or name == 'Duration' or name == 'Finished':
            feature_group.append('Response')
            feature_label.append(name)

        elif match(r'Q\d+_', name):
            split_pattern = '\? - '
            if search(split_pattern, description):
                elements = name.split(split_pattern)
                feature_group.append(elements[0])
                feature_label.append(elements[0])
            else:
                feature_group.append('NA')
                feature_label.append(name)
        else:
            feature_group.append('NA')
            feature_label.append(name)

    #print( feature )
    #print( feature_description )

    df_metadata = pd.DataFrame(
        list(zip(feature, feature_label, feature_group, feature_description)),
        columns=['Feature Name', 'Feature Description', 'Feature Group'])

    url = project_relative_root_path + local_project + METADATA_SUFFIX
    print(f'url (save {METADATA_SUFFIX}): {url}')
    df_metadata.to_excel(url, index=False, encoding='utf-8')

    return df
Esempio n. 30
0
    def plan_process(self):
        # 进入房间静默等待本期结束,准备开始
        global money
        self.check_next_issue()
        # 是否中奖指针设置为未中奖
        isprize = False
        # 金额进度指针设置为0
        i = 0
        # 初始化数据统计三个变量
        issue_list = []
        money_list = []
        income_list = []
        # 开始循环,推出条件为True
        while isprize == False:
            # 设置三等报警条件
            if i == 3:
                self.alarm_1()
            if i == 4:
                self.alarm_2()
            if i == 5:
                self.alarm_3()
                i = 0
            # 设置收入为0,为数据统计准备
            income = 0

            # 10-60秒,进入先投钱,然后再初始化金钱,点击,投钱,确认
            money, shade = self.my_plan(indicator=i)
            print('这是第{}次投注'.format(i + 1))

            # 金钱指针前移一位
            i += 1
            # 数据统计三个列表添加数据
            issue_list.append(self.check_issue())
            money_list.append(money)
            income_list.append(0)
            # 等待本局截至,静默20秒,等待开奖
            self.check_next_issue()
            time.sleep(20)
            # 开奖,若中为True,不中为False
            isprize = self.check_num(shade)

        # 中奖后跳出循环,开始统计本剧收益

        statements = money * 1.93  #单局中赔率为1.93
        # 统计每局收入状况
        for j in range(len(money_list)):
            income_list[j] = 0 - money_list[j]
        income_list[-1] = statements - money_list[-1]
        # 构建pandas数据体
        data = pd.DataFrame(
            {
                '期数': issue_list,
                '金额': money_list,
                '收益': income_list,
            },
            columns=['期数', '金额', '收益'])
        time.sleep(5)
        return data