def main_workflow(engine, dlpx_obj): """ This function is where we create our main workflow. Use the @run_async decorator to run this function asynchronously. The @run_async decorator allows us to run against multiple Delphix Engine simultaneously :param engine: Dictionary of engines :type engine: dictionary :param dlpx_obj: Virtualization Engine session object :type dlpx_obj: lib.GetSession.GetSession """ try: #Setup the connection to the Delphix Engine dlpx_obj.serversess(engine['ip_address'], engine['username'], engine['password']) except DlpxException as e: print_exception('ERROR: Engine {} encountered an error while' 'rewinding {}:\n{}\n'.format(engine['hostname'], arguments['--target'], e)) thingstodo = ["thingtodo"] try: with dlpx_obj.job_mode(single_thread): while len(dlpx_obj.jobs) > 0 or len(thingstodo) > 0: if len(thingstodo) > 0: rewind_database(dlpx_obj, arguments['--vdb'], arguments['--timestamp'], arguments['--timestamp_type']) thingstodo.pop() # get all the jobs, then inspect them i = 0 for j in dlpx_obj.jobs.keys(): job_obj = job.get(dlpx_obj.server_session, dlpx_obj.jobs[j]) print_debug(job_obj) print_info('{}: Refresh of {}: {}'.format( engine['hostname'], arguments['--vdb'], job_obj.job_state)) if job_obj.job_state in ['CANCELED', 'COMPLETED', 'FAILED']: # If the job is in a non-running state, remove it # from the running jobs list. del dlpx_obj.jobs[j] elif job_obj.job_state in 'RUNNING': # If the job is in a running state, increment the # running job count. i += 1 print_info('{}: {:d} jobs running.'.format( engine['hostname'], i)) # If we have running jobs, pause before repeating the # checks. if len(dlpx_obj.jobs) > 0: sleep(float(arguments['--poll'])) except (DlpxException, RequestError, JobError, HttpError) as e: print_exception('Error in dx_rewind_vdb: {}\n{}'.format( engine['hostname'], e)) sys.exit(1)
def main_workflow(engine): """ This function actually runs the jobs. Use the @run_async decorator to run this function asynchronously. This allows us to run against multiple Delphix Engine simultaneously engine: Dictionary of engines """ try: #Setup the connection to the Delphix Engine dx_session_obj.serversess(engine['ip_address'], engine['username'], engine['password']) except DlpxException as e: print_exception('\nERROR: Engine {} encountered an error while' '{}:\n{}\n'.format(engine['hostname'], arguments['--target'], e)) sys.exit(1) thingstodo = ["thingtodo"] try: with dx_session_obj.job_mode(single_thread): while (len(dx_session_obj.jobs) > 0 or len(thingstodo)> 0): if len(thingstodo) > 0: if arguments['--add'] : add_group(arguments['--group_name']) elif arguments['--delete']: delete_group(arguments['--group_name']) elif arguments['--list']: list_groups() thingstodo.pop() # get all the jobs, then inspect them i = 0 for j in dx_session_obj.jobs.keys(): job_obj = job.get(dx_session_obj.server_session, dx_session_obj.jobs[j]) print_debug(job_obj) print_info('{}: Group: {}'.format( engine['hostname'], job_obj.job_state)) if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]: # If the job is in a non-running state, remove it # from the # running jobs list. del dx_session_obj.jobs[j] elif job_obj.job_state in 'RUNNING': # If the job is in a running state, increment the # running job count. i += 1 print_info('{}: {:d} jobs running.'.format( engine['hostname'], i)) # If we have running jobs, pause before repeating the # checks. if len(dx_session_obj.jobs) > 0: sleep(float(arguments['--poll'])) except (HttpError, RequestError, JobError, DlpxException) as e: print_exception('ERROR: Could not complete group ' 'operation: {}'.format(e))
def main_workflow(engine): """ This function actually runs the jobs. Use the @run_async decorator to run this function asynchronously. This allows us to run against multiple Delphix Engine simultaneously engine: Dictionary of engines """ jobs = {} try: #Setup the connection to the Delphix Engine dx_session_obj.serversess(engine['ip_address'], engine['username'], engine['password']) except DlpxException as e: print_exception('\nERROR: Engine %s encountered an error while' '%s:\n%s\n' % (engine['hostname'], arguments['--target'], e)) sys.exit(1) thingstodo = ["thingtodo"] #reset the running job count before we begin i = 0 with dx_session_obj.job_mode(single_thread): while (len(jobs) > 0 or len(thingstodo) > 0): if len(thingstodo) > 0: if arguments['--pw']: update_ase_db_pw() #elif OPERATION: # method_call thingstodo.pop() #get all the jobs, then inspect them i = 0 for j in jobs.keys(): job_obj = job.get(dx_session_obj.server_session, jobs[j]) print_debug(job_obj) print_info(engine["hostname"] + ": VDB Operations: " + job_obj.job_state) if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]: #If the job is in a non-running state, remove it from the # running jobs list. del jobs[j] else: #If the job is in a running state, increment the running # job count. i += 1 print_info(engine["hostname"] + ": " + str(i) + " jobs running. ") #If we have running jobs, pause before repeating the checks. if len(jobs) > 0: sleep(float(arguments['--poll']))
def main_workflow(engine): """ This function actually runs the jobs. Use the @run_async decorator to run this function asynchronously. This allows us to run against multiple Delphix Engine simultaneously engine: Dictionary of engines """ jobs = {} try: #Setup the connection to the Delphix Engine dx_session_obj.serversess(engine['ip_address'], engine['username'], engine['password']) except DlpxException as e: print_exception('\nERROR: Engine {} encountered an error while' '{}:\n{}\n'.format(engine['hostname'], arguments['--target'], e)) sys.exit(1) thingstodo = ["thingtodo"] with dx_session_obj.job_mode(single_thread): while len(dx_session_obj.jobs) > 0 or len(thingstodo) > 0: if len(thingstodo) > 0: refresh_database(arguments['--vdb'], arguments['--timestamp'], arguments['--timestamp_type']) thingstodo.pop() #get all the jobs, then inspect them i = 0 for j in dx_session_obj.jobs.keys(): job_obj = job.get(dx_session_obj.server_session, dx_session_obj.jobs[j]) print_debug(job_obj) print_info('{}: Operations: {}'.format(engine['hostname'], job_obj.job_state)) if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]: #If the job is in a non-running state, remove it from the # running jobs list. del dx_session_obj.jobs[j] elif job_obj.job_state in 'RUNNING': #If the job is in a running state, increment the running # job count. i += 1 print_info('{}: {:d} jobs running.'.format( engine['hostname'], i)) #If we have running jobs, pause before repeating the checks. if len(dx_session_obj.jobs) > 0: sleep(float(arguments['--poll']))
def create_ora_sourceconfig(engine_name, port_num=1521): """ :param ip_addr: :param db_name: :return: """ create_ret = None env_obj = find_obj_by_name(dx_session_obj.server_session, environment, arguments['--env_name']) try: sourceconfig_ref = find_obj_by_name(dx_session_obj.server_session, sourceconfig, arguments['--db_name']).reference except DlpxException: sourceconfig_ref = None repo_ref = find_dbrepo(dx_session_obj.server_session, 'OracleInstall', env_obj.reference, arguments['--db_install_path']).reference dsource_params = OracleSIConfig() connect_str = ('jdbc:oracle:thin:@' + arguments['--ip_addr'] + ':' + str(port_num) + ':' + arguments['--db_name']) dsource_params.database_name = arguments['--db_name'] dsource_params.unique_name = arguments['--db_name'] dsource_params.repository = repo_ref dsource_params.instance = OracleInstance() dsource_params.instance.instance_name = arguments['--db_name'] dsource_params.instance.instance_number = 1 dsource_params.services = [{'type': 'OracleService', 'jdbcConnectionString': connect_str}] try: if sourceconfig_ref is None: create_ret = link_ora_dsource(sourceconfig.create( dx_session_obj.server_session, dsource_params), env_obj.primary_user) elif sourceconfig_ref is not None: create_ret = link_ora_dsource(sourceconfig_ref, env_obj.primary_user) print_info('Created and linked the dSource {} with reference {}.\n'.format( arguments['--db_name'], create_ret)) link_job_ref = dx_session_obj.server_session.last_job link_job_obj = job.get(dx_session_obj.server_session, link_job_ref) while link_job_obj.job_state not in ["CANCELED", "COMPLETED", "FAILED"]: print_info('Waiting three seconds for link job to complete, and sync to begin') sleep(3) link_job_obj = job.get(dx_session_obj.server_session, link_job_ref) #Add the snapsync job to the jobs dictionary dx_session_obj.jobs[engine_name + 'snap'] = get_running_job( dx_session_obj.server_session, find_obj_by_name( dx_session_obj.server_session, database, arguments['--dsource_name']).reference) print_debug('Snapshot Job Reference: {}.\n'.format( dx_session_obj.jobs[engine_name + 'snap'])) except (HttpError, RequestError) as e: print_exception('ERROR: Could not create the sourceconfig:\n' '{}'.format(e)) sys.exit(1)
def main_workflow(engine, dlpx_obj): """ This function is where we create our main workflow. Use the @run_async decorator to run this function asynchronously. The @run_async decorator allows us to run against multiple Delphix Engine simultaneously :param engine: Dictionary of engines :type engine: dictionary :param dlpx_obj: Virtualization Engine session object :type dlpx_obj: lib.GetSession.GetSession """ try: # Setup the connection to the Delphix Engine dlpx_obj.serversess(engine['ip_address'], engine['username'], engine['password']) except DlpxException as e: print_exception('ERROR: Engine {} encountered an error while' '{}:\n{}\n'.format(engine['hostname'], arguments['--target'], e)) sys.exit(1) thingstodo = ["thingtodo"] try: with dlpx_obj.job_mode(single_thread): while len(dlpx_obj.jobs) > 0 or len(thingstodo) > 0: if len(thingstodo)> 0: if arguments['--type'] == 'linux' or arguments['--type'] == 'windows': env_name = arguments['--env_name'] host_user = arguments['--host_user'] pw = arguments['--pw'] ip_addr = arguments['--ip'] host_name = arguments['--connector_name'] if arguments['--type'] == 'linux': toolkit_path = arguments['--toolkit'] create_linux_env(dlpx_obj, env_name, host_user, ip_addr, toolkit_path, pw) else: create_windows_env(dlpx_obj, env_name, host_user, ip_addr, pw, host_name,) elif arguments['--delete']: delete_env(dlpx_obj, arguments['--delete']) elif arguments['--refresh']: refresh_env(dlpx_obj, arguments['--refresh']) elif arguments['--update_ase_pw']: update_ase_pw(dlpx_obj) elif arguments['--update_ase_user']: update_ase_username(dlpx_obj) elif arguments['--list']: list_env(dlpx_obj) elif arguments['--update_host']: update_host_address(dlpx_obj, arguments['--old_host_address'], arguments['--new_host_address']) elif arguments['--enable']: enable_environment(dlpx_obj, arguments['--env_name']) elif arguments['--disable']: disable_environment(dlpx_obj, arguments['--env_name']) thingstodo.pop() # get all the jobs, then inspect them i = 0 for j in dlpx_obj.jobs.keys(): job_obj = job.get(dlpx_obj.server_session, dlpx_obj.jobs[j]) print_debug(job_obj) print_info('{} Environment: {}'.format( engine['hostname'], job_obj.job_state)) if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]: # If the job is in a non-running state, remove it # from the running jobs list. del dlpx_obj.jobs[j] elif job_obj.job_state in 'RUNNING': # If the job is in a running state, increment the # running job count. i += 1 print_info('{}: {:d} jobs running.'.format( engine['hostname'], i)) # If we have running jobs, pause before repeating the # checks. if len(dlpx_obj.jobs) > 0: sleep(float(arguments['--poll'])) except (DlpxException, RequestError, JobError, HttpError) as e: print_exception('Error while creating the environment {}\n{}'.format( arguments['--env_name'], e)) sys.exit(1)