def _post( request: Response, claims: Dict[str, Any], model_type: str, model_type_singular: str, model_id: str, org_id: str, ): """Extend post for transfers.""" post(request, claims, model_type, model_type_singular, model_id, org_id)
def execute_right_script(list_of_selected_deployments, rs_name, bearer_token): logging.info("Executing right script on selected deployments") headers = { 'X-API-Version': '1.5', 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + bearer_token } list_of_ins_href = bck_ip(list_of_selected_deployments, headers) list_of_rs_response = [] rs_href = right_script(rs_name, headers) if rs_href == "": return list_of_rs_response data = {"right_script_href": rs_href} for bck_ins_href in list_of_ins_href: url = bck_ins_href + '/run_executable' # print(url) # rs_href = right_script(rs_name, headers) # data = {"right_script_href": rs_href} response = api.post(url, data, headers) list_of_rs_response.append(response.status_code) # print(list_of_rs_response) return list_of_rs_response
def execute_right_script(list_of_selected_deployments, rs_name, bearer_token): print("inside execute rightscript") print(rs_name) print(RS_URL) print(list_of_selected_deployments) headers = { 'X-API-Version': '1.5', 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + bearer_token } list_of_ins_href = bck_ip(list_of_selected_deployments, headers) list_of_rs_response = [] for bck_ins_href in list_of_ins_href: url = bck_ins_href + '/run_executable' rs_href = right_script(rs_name, headers) data = {"right_script_href": rs_href} print(url) print(rs_href) response = api.post(url, data, headers) list_of_rs_response.append(response) return list_of_rs_response
def dump(logger, scheduled, cluster, cluster_id, dbname, database_id, backupdir): ''' Dump database of one cluster ''' # Determine full path and name for backupfile. backupfile = backup.get_backupfile(logger, backupdir, cluster, scheduled, dbname) logger.info('Backup file: %s' % backupfile) if scheduled != backup.manual: # Maintenance backup files. logger.info('Starting backup maintenance') backup.backup_maintenance(logger, cluster, scheduled, dbname, backupdir) logger.info('Finished backup maintenance') # On make daily backup. Others, copy only file. if scheduled == backup.daily or scheduled == backup.manual: # Determine standby node from cluster try: standby = api.get('standby/' + str(cluster_id)) logger.debug('Standby node: %s' % standby['data']) except Exception as e: print(e) standby = standby['data'] # If standby node is down or if standalone topology. if not standby: standby = cluster # Get backup user. try: backup_user = os.getenv('PGMB_EDBUSER', 'postgres') backup_dbport = os.getenv('PGMB_EDBPORT', 5432) except os.error: print("""User not set. Use export=PGMB_EDBUSER=your_username;""") command = 'pg_dump -U%s -h %s -p %s -Fc -d %s -f %s' % ( backup_user, standby, backup_dbport, dbname, backupfile) logger.debug(command) command = shlex.split(command) start = dt.datetime.now() try: ps = subprocess.Popen(command, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as err: try: data = {} data['cluster_id'] = cluster_id data['database_id'] = database_id data['scheduled'] = scheduled data['timecreated'] = str(dt.datetime.now()) data['state'] = False data['info'] = err response = api.post('backup/logging', data) logger.debug('Inserted backup error logging: %s' % response) except Exception as e: print('Cannot insert error log data') print(e) pass pass stdoutdata, stderrdata = ps.communicate() state = False if stderrdata is None or len(stderrdata) == 0: state = True stderrdata = None statinfo = os.stat(backupfile) dumpsize = statinfo.st_size end = dt.datetime.now() difference = end - start seconds = difference.total_seconds() try: data = {} data['cluster_id'] = cluster_id data['database_id'] = database_id data['scheduled'] = scheduled data['timecreated'] = str(dt.datetime.now()) data['state'] = state data['size'] = dumpsize data['duration'] = int(seconds) response = api.post('backup/logging', data) logger.debug('Inserted backup logging: %s' % response) except Exception as e: print('Cannot insert log data') print(e) pass else: # Get daily lastest backup. dailybackupfile = backup.get_oldest_backupfile( logger, cluster, backup.daily, dbname, backupdir) if dailybackupfile is not None: try: start = dt.datetime.now() shutil.copyfile(dailybackupfile, backupfile) statinfo = os.stat(backupfile) dumpsize = statinfo.st_size end = dt.datetime.now() difference = end - start seconds = difference.total_seconds() try: data = {} data['cluster_id'] = cluster_id data['database_id'] = database_id data['scheduled'] = scheduled data['timecreated'] = str(dt.datetime.now()) data['state'] = True data['size'] = dumpsize data['duration'] = int(seconds) response = api.post('backup/logging', data) logger.debug('Inserted backup logging: %s' % response) except Exception as err: print(err) pass except (IOError, shutil.Error) as err: try: data = {} data['cluster_id'] = cluster_id data['database_id'] = database_id data['scheduled'] = scheduled data['timecreated'] = str(dt.datetime.now()) data['state'] = False data['info'] = err response = api.post('backup/logging', data) logger.debug('Inserted backup logging: %s' % response) except Exception as e: print(e) pass pass else: try: data = {} data['cluster_id'] = cluster_id data['database_id'] = database_id data['scheduled'] = scheduled data['timecreated'] = str(dt.datetime.now()) data['state'] = False data['info'] = 'Cannot find backupfile' response = api.post('backup/logging', data) logger.debug('Inserted backup logging: %s' % response) except Exception as e: print(e) pass
def authn_to_RS(RS_URL, rs_token): data = {'grant_type': 'refresh_token', 'refresh_token': rs_token} OA_URL = RS_URL + 'api/oauth2' br_token = api.post(OA_URL, data, headers) return br_token