def sync_edge(self, line): args = json.loads(line['args']) #print('p',len(line['node'].split()))#Баскетбол_Волконский_Д_А_ФФ1 print("sssss", args) if len(line['node'].split()) > 1: #print('@', len(line['node'])) self.one_to_many_sync(one_node=line['link'], many_node=line['node'], args=args, one_node_to_many_link=False) elif len(line['link'].split()) > 1: #print(1, line['node']) self.one_to_many_sync(one_node=line['node'], many_node=line['link'], args=json.loads(line['args']), one_node_to_many_link=True) elif len(line['link'].split()) == 1 and len(line['node'].split()) == 1: print(123, line['node'], line['link'], args) to_wks = util.read_sheet(util.get_sheet_id_from_link(line['link'])) form_wks = util.read_sheet(util.get_sheet_id_from_link(line['node'])) util.sync_by_colname(from_wks=form_wks, to_wks=to_wks, **args) # self.one_to_many_sync(one_node=line['node'], many_node=line['link'], # args=json.loads(line['args']), one_node_to_many_link=True) else: from_wks = util.read_sheet(util.get_sheet_id_from_link(line['node'])) if line['parse_funktion']: from_df = PARSE_FUNK[line['parse_funktion']](from_wks.get_as_df(), NODE_DICT) else: from_df = from_wks.get_as_df() add_key = line["can_add_key"] print('s e') self.sync_straight(df=from_df, link_col=line['straight_link_col'], args=args, add_key=add_key)
def one_to_many_sync(self, one_node, many_node, args={}, one_node_to_many_link=True): print(one_node) one_wks = util.read_sheet(util.get_sheet_id_from_link(one_node)) many_links = many_node.split() for link in many_links: if link: one_of_many_wks = util.read_sheet(util.get_sheet_id_from_link(link)) print(args['to_values_colname'], one_node, many_node) if one_node_to_many_link: util.sync_by_colname(from_wks=one_wks, to_wks=one_of_many_wks, **args) else: util.sync_by_colname(from_wks=one_of_many_wks, to_wks=one_wks, **args)
def _read_config_file(config_file, sheet_names=config.SHEET_NAMES): """ Reads an Excel configuration file with initial and boundary conditions and also time series for sinks and sources :param config_file: The path to the excel file :type config_file: str :param sheet_names: names of the needed sheets :type sheet_names: dict """ C = dict() wb = xlrd.open_workbook(config_file) for name, sheet_name in sheet_names.items(): C[name] = read_sheet(wb, sheet_name) # A estos arreglos se les aplica la misma función f arreglos_f = [ 'SOD', 'SDBO', 'SNH3', 'SNO2', 'SNO3', 'SDQO', 'STDS', 'SGyA', 'SEC', 'STC', 'SPorg', 'SPdis', 'STSS', 'SSS', 'SALK' ] for arreglo in arreglos_f: C[arreglo] = f(C[arreglo][0:, 1:], C['Caudales'][0:, 1:], C['Caudales'][0, 1:]) C['ST'] = (C['ST'][0:, 1:] + 273) C['SpH'] = (10**(-1 * (C['SpH'][0:, 1:]))) * C['Caudales'][0:, 1:] / ( C['Caudales'][0, 1:] + C['Caudales'][0:, 1:]) return C
def transfer(self, id, new_group, key_column=None, value_column="Секция"): main_df = self.wks.get_as_df() main_row = util.get_row_by_keys(main_df, key_value=[id], key_column=key_column) old_wks_name = main_row[value_column].values[0] old_wks = util.read_sheet( util.affilation_wks(old_wks_name, self.graph_df)[0]) new_wks = util.read_sheet( util.affilation_wks(new_group, self.graph_df)[0]) old_row = util.get_row_by_keys(old_wks.get_as_df(), main_row[['ФИО', 'Группа']].values[0], ['ФИО', 'Группа']) old_wks.delete_rows(int(old_row.index[0] + 2)) util.add_row_down(wks=new_wks, values=list(old_row.values[0])) col = main_df.columns.get_loc(value_column) + 1 self.change_cell(col=col, row=main_row.index[0] + 2, val=new_group)
def __init__(self, sheet_id: str, graph=None, pg_outh_pass='******'): """ :param sheet_id: id where we will do change :param graph: sheet with pair <name of sheet, id> to open and edit relation sheets """ self.sheet_id = sheet_id self.pg = pygsheets.authorize(outh_file=pg_outh_pass) self.wks = util.read_sheet(key=sheet_id, pygsheet=self.pg) self.df = self.wks.get_as_df() if isinstance(graph, str): self.graph_df = util.read_sheet(key=graph, pygsheet=self.pg).get_as_df() self.graph_id = graph elif graph is not None: self.graph_df = graph self.graph_id = None elif graph is None: pass # TODO create graph
def create_graph_line(self, Pass=1, node_short_name='', node_dict={}, last_sink_time=time.time(), parse_funktion='', links_dict={}, straight_link_col='', args='{"to_key_colname": "ФИО", "to_values_colname": []}', convert_function='', can_add_key='', graph_id=None ): graph_wks = util.read_sheet(key=(graph_id or self.graph_id)) link_names, links = self.dict_to_title_and_link(links_dict) node_names, nodes = self.dict_to_title_and_link(node_dict) row_values = [Pass, node_short_name,node_names, nodes, last_sink_time, parse_funktion, link_names, links, straight_link_col, args, convert_function, can_add_key ] graph_wks.insert_rows(1, number=1, values=row_values, inherit=False) pass
def open_protected_today(key, pg): wks = util.read_sheet(key) df = wks.get_as_df() col = datetime.datetime.now().strftime("%m-%d") ind = df.columns.get_loc(col) #delete protected range request = { "deleteProtectedRange": { "protectedRangeId": 1, } } try: pg.sh_batch_update(key, request, None, False) except: pass #set new range gridrange = { "sheetId": 0, "startRowIndex": 0, "endRowIndex": 50, "startColumnIndex": 0, "endColumnIndex": ind, } editors = { "users": [ "*****@*****.**", ] } request = {"addProtectedRange": { "protectedRange": { "protectedRangeId": 1, "range": gridrange, "editors": editors }, }} pg.sh_batch_update(key, request, None, False)
value_column=['03-03']) print(cur_value, type(cur_value)) # print(cur_value.index[0],) g.change_cell_with_id(key_column=['ФИО'], id=['Лукманов А Р'], value_column=['03-10'], value='23') if 0: main_sheet = GDrive( sheet_id='174rn-nhlAkbvVF6cI6JMRAfXQGZWxPxi-iNprtHm9fU', graph='1MeAYhENafzQMoTKDG-VOLI308ih1wl7SjMLbOkwJU4M') #main_sheet.transfer(id='*****@*****.**', new_group='Футбол_Белоглазов_В_В_ФФ1', # key_column=['Почта'], value_column='Секция') df = util.read_sheet(name='Футбол_Белоглазов_В_В_ФФ1').get_as_df() print( main_sheet.find(key_column=['Почта'], id=['*****@*****.**'], value_column='Секция')) print('Тест Т Т' in df['ФИО'].values) #g = GDrive('1tV9pzE0Ds8-eW5btij6Wvn-c3xljBeTQ0ASh9cX-Kk4', # graph='1ZR38L8tFMdUTODVQ0cdz79jPKX4xpNn7R11S82u5Jyk') #g.edit_all_forms_in_graph_sell('link', 1) #g.create_protected_range(end_range=[50,5], editors_mails=['*****@*****.**']) #g.edit_col_lenth() #print(g.wks.get_values(start=(1,1), end=(3,5), value_render='FORMULA')) # GENERATE SUBSET # put main wks
def __init__(self, sheet_id): self.pg = pygsheets.authorize(outh_file='other.json') self.graph_wks = util.read_sheet(key=sheet_id, pygsheet=self.pg) self.graph_df = self.graph_wks.get_as_df() self.drive = None # GDrive()
def open_node_by_name(self, node_name): row = util.get_row_by_keys(self.graph, key_column=["node_name"], key_value=[node_name]) # print(node_name, 'r=', row) assert not row.empty, Exception("Can't find link_name [{}] in graph".format(node_name)) link = row["node"].values[0] return util.read_sheet(util.get_sheet_id_from_link(link))
many_links = many_node.split() for link in many_links: if link: one_of_many_wks = util.read_sheet(util.get_sheet_id_from_link(link)) print(args['to_values_colname'], one_node, many_node) if one_node_to_many_link: util.sync_by_colname(from_wks=one_wks, to_wks=one_of_many_wks, **args) else: util.sync_by_colname(from_wks=one_of_many_wks, to_wks=one_wks, **args) def sync_all(self): for i, line in graph.iterrows(): print('l i=', i, ' pass='******'Pass']) if line['Pass']: continue self.sync_edge(line) if __name__ == "__main__": #graph = util.read_sheet('1MeAYhENafzQMoTKDG-VOLI308ih1wl7SjMLbOkwJU4M').get_as_df() #graph = util.read_sheet('1q8z_9QDwSia1IMo7qvDdH2cui0-D5My0xkClopMWcqw').get_as_df() # TEST graph = util.read_sheet('1sqMrcxNvbLna3BHbzQEj-W3ivVBmUPQQ4aPQhp1kW5M').get_as_df() #graph = util.read_sheet('1ZR38L8tFMdUTODVQ0cdz79jPKX4xpNn7R11S82u5Jyk').get_as_df() g = Graph(graph) g.sync_all()