Ejemplo n.º 1
0
def rewind_database(dlpx_obj, vdb_name, timestamp, timestamp_type='SNAPSHOT'):
    """
    This function performs the rewind (rollback)

    dlpx_obj: Virtualization Engine session object
    vdb_name: VDB to be rewound
    timestamp: Point in time to rewind the VDB
    timestamp_type: The type of timestamp being used for the rewind
    """

    engine_name = dlpx_obj.dlpx_engines.keys()[0]
    dx_timeflow_obj = DxTimeflow(dlpx_obj.server_session)
    container_obj = find_obj_by_name(dlpx_obj.server_session, database,
                                     vdb_name)
    # Sanity check to make sure our container object has a reference
    if container_obj.reference:
        try:
            if container_obj.virtual is not True:
                raise DlpxException('{} in engine {} is not a virtual object. '
                                    'Skipping.\n'.format(container_obj.name,
                                    engine_name))
            elif container_obj.staging is True:
                raise DlpxException('{} in engine {} is a virtual object. '
                                    'Skipping.\n'.format(container_obj.name,
                                    engine_name))
            elif container_obj.runtime.enabled == "ENABLED":
                print_info('\nINFO: {} Rewinding {} to {}\n'.format(
                           engine_name, container_obj.name, timestamp))

        # This exception is raised if rewinding a vFiles VDB
        # since AppDataContainer does not have virtual, staging or
        # enabled attributes.
        except AttributeError:
            pass

        print_debug('{}: Type: {}'.format(engine_name, container_obj.type))

        # If the vdb is a Oracle type, we need to use a OracleRollbackParameters
        if str(container_obj.reference).startswith("ORACLE"):
            rewind_params = OracleRollbackParameters()
        else:
            rewind_params = RollbackParameters()
        rewind_params.timeflow_point_parameters = \
            dx_timeflow_obj.set_timeflow_point(container_obj, timestamp_type,
                                               timestamp)
        print_debug('{}: {}'.format(engine_name, str(rewind_params)))
        try:
            # Rewind the VDB
            database.rollback(dlpx_obj.server_session, container_obj.reference,
                              rewind_params)
            dlpx_obj.jobs[engine_name] = dlpx_obj.server_session.last_job
            print_info('VDB {} was rolled back.'.format(container_obj.name))
        except (RequestError, HttpError, JobError) as e:
            print_exception('ERROR: {} encountered an error on {}'
                            ' during the rewind process:\n{}'.format(
                engine_name, container_obj.name, e))
    # Don't do anything if the database is disabled
    else:
        print_info('{}: {} is not enabled. Skipping sync.'.format(engine_name,
                                                            container_obj.name))
Ejemplo n.º 2
0
def main_workflow(engine):
    """
    This function actually runs the jobs.
    Use the @run_async decorator to run this function asynchronously.
    This allows us to run against multiple Delphix Engine simultaneously

    engine: Dictionary of engines
    """
    try:
        #Setup the connection to the Delphix Engine
        dx_session_obj.serversess(engine['ip_address'], engine['username'],
                                  engine['password'])

    except DlpxException as e:
        print_exception('\nERROR: Engine {} encountered an error while' 
                        '{}:\n{}\n'.format(engine['hostname'],
                        arguments['--target'], e))
        sys.exit(1)

    thingstodo = ["thingtodo"]
    try:
        with dx_session_obj.job_mode(single_thread):
            while (len(dx_session_obj.jobs) > 0 or len(thingstodo)> 0):
                if len(thingstodo) > 0:
                    if arguments['--add'] :
                        add_user(arguments['--user_name'], arguments['--password'], arguments['--email'], arguments['--jsonly'])
                    elif arguments['--update'] :
                        update_user(arguments['--user_name'], arguments['--password'], arguments['--email'], arguments['--jsonly'])
                    elif arguments['--delete']:
                        delete_user(arguments['--user_name'])
                    elif arguments['--list']:
                        list_users()
                    thingstodo.pop()
                # get all the jobs, then inspect them
                i = 0
                for j in dx_session_obj.jobs.keys():
                    job_obj = job.get(dx_session_obj.server_session,
                                      dx_session_obj.jobs[j])
                    print_debug(job_obj)
                    print_info('{}: User: {}'.format(
                        engine['hostname'], job_obj.job_state))
                    if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
                        # If the job is in a non-running state, remove it
                        # from the
                        # running jobs list.
                        del dx_session_obj.jobs[j]
                    elif job_obj.job_state in 'RUNNING':
                        # If the job is in a running state, increment the
                        # running job count.
                        i += 1
                    print_info('{}: {:d} jobs running.'.format(
                               engine['hostname'], i))
                    # If we have running jobs, pause before repeating the
                    # checks.
                    if len(dx_session_obj.jobs) > 0:
                        sleep(float(arguments['--poll']))

    except (HttpError, RequestError, JobError, DlpxException) as e:
        print_exception('ERROR: Could not complete user '
                        'operation: {}'.format(e))
Ejemplo n.º 3
0
def main_workflow(engine):
    """
    This function actually runs the jobs.
    Use the @run_async decorator to run this function asynchronously.
    This allows us to run against multiple Delphix Engine simultaneously

    engine: Dictionary of engines
    """
    try:
        # Setup the connection to the Delphix Engine
        dx_session_obj.serversess(engine["ip_address"], engine["username"],
                                  engine["password"])

    except DlpxException as e:
        print_exception("\nERROR: Engine {} encountered an error while"
                        "{}:\n{}\n".format(engine["hostname"],
                                           arguments["--target"], e))
        sys.exit(1)

    thingstodo = ["thingtodo"]
    try:
        with dx_session_obj.job_mode(single_thread):
            while len(dx_session_obj.jobs) > 0 or len(thingstodo) > 0:
                if len(thingstodo) > 0:
                    if arguments["--add"]:
                        add_group(arguments["--group_name"])
                    elif arguments["--delete"]:
                        delete_group(arguments["--group_name"])
                    elif arguments["--list"]:
                        list_groups()
                    thingstodo.pop()
                # get all the jobs, then inspect them
                i = 0
                for j in dx_session_obj.jobs.keys():
                    job_obj = job.get(dx_session_obj.server_session,
                                      dx_session_obj.jobs[j])
                    print_debug(job_obj)
                    print_info("{}: Group: {}".format(engine["hostname"],
                                                      job_obj.job_state))
                    if job_obj.job_state in [
                            "CANCELED", "COMPLETED", "FAILED"
                    ]:
                        # If the job is in a non-running state, remove it
                        # from the
                        # running jobs list.
                        del dx_session_obj.jobs[j]
                    elif job_obj.job_state in "RUNNING":
                        # If the job is in a running state, increment the
                        # running job count.
                        i += 1
                    print_info("{}: {:d} jobs running.".format(
                        engine["hostname"], i))
                    # If we have running jobs, pause before repeating the
                    # checks.
                    if len(dx_session_obj.jobs) > 0:
                        sleep(float(arguments["--poll"]))

    except (HttpError, RequestError, JobError, DlpxException) as e:
        print_exception("ERROR: Could not complete group "
                        "operation: {}".format(e))
Ejemplo n.º 4
0
def get_config(config_file_path):
    """
    This function reads in the dxtools.conf file
    """
    # First test to see that the file is there and we can open it
    try:
        config_file = open(config_file_path).read()
    except:
        print_error("Was unable to open " + config_file_path +
                    ". Please check the path and permissions, then try again.")
        sys.exit(1)

    # Now parse the file contents as json and turn them into a
    # python dictionary, throw an error if it isn't proper json
    try:
        config = json.loads(config_file)
    except:
        print_error("Was unable to read " + config_file_path +
                    " as json. Please check file in a json formatter and "
                    "try again.")
        sys.exit(1)

    # Create a dictionary of engines (removing the data node from the
    # dxtools.json, for easier parsing)
    delphix_engines = {}
    for each in config["data"]:
        delphix_engines[each["hostname"]] = each

    print_debug(delphix_engines)
    return delphix_engines
def get_config(config_file_path):
    """
    This function reads in the dxtools.conf file
    """
    #First test to see that the file is there and we can open it
    try:
        config_file = open(config_file_path).read()
    except:
        print_error("Was unable to open " + config_file_path + 
                    ". Please check the path and permissions, then try again.")
        sys.exit(1)

    #Now parse the file contents as json and turn them into a 
    # python dictionary, throw an error if it isn't proper json
    try:
        config = json.loads(config_file)
    except:
        print_error("Was unable to read " + config_file_path + 
                    " as json. Please check file in a json formatter and " \
                    "try again.")
        sys.exit(1)

    #Create a dictionary of engines (removing the data node from the 
    # dxtools.json, for easier parsing)
    delphix_engines = {}
    for each in config['data']:
        delphix_engines[each['hostname']] = each

    print_debug(delphix_engines)
    return delphix_engines
Ejemplo n.º 6
0
def find_snapshot_by_database_and_time(engine, database_obj, snap_time):
    snapshots = snapshot.get_all(
        dx_session_obj.server_session, database=database_obj.reference
    )
    matches = []

    for snapshot_obj in snapshots:
        if str(snapshot_obj.latest_change_point.timestamp).startswith(
            arguments["--timestamp"]
        ):

            matches.append(snapshot_obj)

    if len(matches) == 1:
        print_debug(
            '%s": Found one and only one match. This is good.\n%s'
            % (engine["hostname"], matches[0]),
            debug,
        )

        return matches[0]

    elif len(matches) > 1:
        print_debug(matches, debug)

        raise DlpxException(
            "%s: The time specified was not specific enough."
            "More than one match found.\n" % (engine["hostname"])
        )
    else:
        raise DlpxException(
            "%s: No matches found for the time specified.\n" % (engine["hostname"])
        )
def create_authorization(dlpx_obj, role_name, target_type, target_name,
                         user_name):
    """
    Function to start, stop, enable or disable a VDB

    :param dlpx_obj: Virtualization Engine session object
    :type dlpx_obj: lib.GetSession.GetSession
    :param role_name: Name of the role
    :param target_type: Supports snapshot, group and database target types
    :param target_name: Name of the target
    :param user_name: User for the authorization
    """

    authorization_obj = Authorization()
    print_debug('Searching for {}, {} and {} references.\n'.format(
                role_name, target_name, user_name))
    try:
        authorization_obj.role = find_obj_by_name(dlpx_obj.server_session, role,
                                    role_name).reference
        authorization_obj.target = find_target_type(dlpx_obj, target_type,
                                      target_name).reference
        authorization_obj.user = find_obj_by_name(dlpx_obj.server_session, user,
                                    user_name).reference
        authorization.create(dlpx_obj.server_session, authorization_obj)
    except (RequestError, HttpError, JobError) as e:
        print_exception('An error occurred while creating authorization:\n'
                        '{}'.format(e))
    print 'Authorization successfully created for {}.'.format(user_name)
def rewind_database(dlpx_obj, vdb_name, timestamp, timestamp_type='SNAPSHOT'):
    """
    This function performs the rewind (rollback)

    dlpx_obj: Virtualization Engine session object
    vdb_name: VDB to be rewound
    timestamp: Point in time to rewind the VDB
    timestamp_type: The type of timestamp being used for the rewind
    """

    engine_name = dlpx_obj.dlpx_engines.keys()[0]
    dx_timeflow_obj = DxTimeflow(dlpx_obj.server_session)
    container_obj = find_obj_by_name(dlpx_obj.server_session, database,
                                     vdb_name)
    # Sanity check to make sure our container object has a reference
    if container_obj.reference:
        try:
            if container_obj.virtual is not True:
                raise DlpxException('{} in engine {} is not a virtual object. '
                                    'Skipping.\n'.format(container_obj.name,
                                    engine_name))
            elif container_obj.staging is True:
                raise DlpxException('{} in engine {} is a virtual object. '
                                    'Skipping.\n'.format(container_obj.name,
                                    engine_name))
            elif container_obj.runtime.enabled == "ENABLED":
                print_info('\nINFO: {} Rewinding {} to {}\n'.format(
                           engine_name, container_obj.name, timestamp))

        # This exception is raised if rewinding a vFiles VDB
        # since AppDataContainer does not have virtual, staging or
        # enabled attributes.
        except AttributeError:
            pass

        print_debug('{}: Type: {}'.format(engine_name, container_obj.type))

        # If the vdb is a Oracle type, we need to use a OracleRollbackParameters
        if str(container_obj.reference).startswith("ORACLE"):
            rewind_params = OracleRollbackParameters()
        else:
            rewind_params = RollbackParameters()
        rewind_params.timeflow_point_parameters = \
            dx_timeflow_obj.set_timeflow_point(container_obj, timestamp_type,
                                               timestamp)
        print_debug('{}: {}'.format(engine_name, str(rewind_params)))
        try:
            # Rewind the VDB
            database.rollback(dlpx_obj.server_session, container_obj.reference,
                              rewind_params)
            dlpx_obj.jobs[engine_name] = dlpx_obj.server_session.last_job
            print_info('VDB {} was rolled back.'.format(container_obj.name))
        except (RequestError, HttpError, JobError) as e:
            print_exception('ERROR: {} encountered an error on {}'
                            ' during the rewind process:\n{}'.format(
                engine_name, container_obj.name, e))
    # Don't do anything if the database is disabled
    else:
        print_info('{}: {} is not enabled. Skipping sync.'.format(engine_name,
                                                            container_obj.name))
Ejemplo n.º 9
0
def find_snapshot_by_database_and_name(engine, database_obj, snap_name):
    """
    Find snapshots by database and name. Return snapshot reference.

    engine: Dictionary of engines from config file.
    database_obj: Database object to find the snapshot against
    snap_name: Name of the snapshot
    """

    snapshots = snapshot.get_all(dx_session_obj.server_session,
                                 database=database_obj.reference)
    matches = []
    for snapshot_obj in snapshots:
        if str(snapshot_obj.name).startswith(arguments['--timestamp']):
            matches.append(snapshot_obj)

    for each in matches:
        print_debug(each.name, debug)

    if len(matches) == 1:
        print_debug(
            '%s: Found one and only one match. This is good.\n %s' %
            (engine['hostname'], matches[0]), debug)
        return matches[0]

    elif len(matches) > 1:
        raise DlpxException('%s: The name specified was not specific enough.'
                            ' More than one match found.\n' %
                            (engine['hostname'], ))

    else:
        raise DlpxException('%s: No matches found for the time specified.\n' %
                            (engine['hostname']))
def create_authorization(dlpx_obj, role_name, target_type, target_name, user_name):
    """
    Function to start, stop, enable or disable a VDB

    :param dlpx_obj: Virtualization Engine session object
    :type dlpx_obj: lib.GetSession.GetSession
    :param role_name: Name of the role
    :param target_type: Supports snapshot, group and database target types
    :param target_name: Name of the target
    :param user_name: User for the authorization
    """

    authorization_obj = Authorization()
    print_debug(
        "Searching for {}, {} and {} references.\n".format(
            role_name, target_name, user_name
        )
    )
    try:
        authorization_obj.role = find_obj_by_name(
            dlpx_obj.server_session, role, role_name
        ).reference
        authorization_obj.target = find_target_type(
            dlpx_obj, target_type, target_name
        ).reference
        authorization_obj.user = find_obj_by_name(
            dlpx_obj.server_session, user, user_name
        ).reference
        authorization.create(dlpx_obj.server_session, authorization_obj)
    except (RequestError, HttpError, JobError) as e:
        print_exception(
            "An error occurred while creating authorization:\n" "{}".format(e)
        )
    print("Authorization successfully created for {}.".format(user_name))
def find_snapshot_by_database_and_name(engine, database_obj, snap_name):
    """
    Find snapshots by database and name. Return snapshot reference.

    engine: Dictionary of engines from config file.
    database_obj: Database object to find the snapshot against
    snap_name: Name of the snapshot
    """

    snapshots = snapshot.get_all(dx_session_obj.server_session,
                                 database=database_obj.reference)
    matches = []
    for snapshot_obj in snapshots:
        if str(snapshot_obj.name).startswith(arguments['--timestamp']):
            matches.append(snapshot_obj)

    for each in matches:
        print_debug(each.name, debug)

    if len(matches) == 1:
        print_debug('%s: Found one and only one match. This is good.\n %s' %
                    (engine['hostname'], matches[0]), debug)
        return matches[0]

    elif len(matches) > 1:
        raise DlpxException('%s: The name specified was not specific enough.'
                            ' More than one match found.\n' %
                            (engine['hostname'],))

    else:
        raise DlpxException('%s: No matches found for the time specified.\n'
                            % (engine['hostname']))
def find_snapshot_by_database_and_time(engine, database_obj, snap_time):
    snapshots = snapshot.get_all(dx_session_obj.server_session,
                                 database=database_obj.reference)
    matches = []

    for snapshot_obj in snapshots:
        if str(snapshot_obj.latest_change_point.timestamp).startswith(arguments['--timestamp']):

            matches.append(snapshot_obj)

    if len(matches) == 1:
        print_debug('%s": Found one and only one match. This is good.\n%s' %
                    (engine['hostname'], matches[0]), debug)

        return matches[0]

    elif len(matches) > 1:
        print_debug(matches, debug)

        raise DlpxException('%s: The time specified was not specific enough.'
                            'More than one match found.\n' %
                            (engine['hostname']))
    else:
        raise DlpxException('%s: No matches found for the time specified.\n'
                            % (engine['hostname']))
def dx_obj_operation(dlpx_obj, vdb_name, operation):
    """
    Function to start, stop, enable or disable a VDB

    :param dlpx_obj: Virtualization Engine session object
    :type dlpx_obj: lib.GetSession.GetSession
    :param vdb_name: Name of the object to stop/start/enable/disable
    :type vdb_name: str
    :param operation: enable or disable dSources and VDBs
    :type operation: str
    """

    print_debug("Searching for {} reference.\n".format(vdb_name))
    engine_name = dlpx_obj.dlpx_engines.keys()[0]
    vdb_obj = find_obj_by_name(dlpx_obj.server_session, source, vdb_name)
    try:
        if vdb_obj:
            if operation == "start":
                source.start(dlpx_obj.server_session, vdb_obj.reference)
            elif operation == "stop":
                source.stop(dlpx_obj.server_session, vdb_obj.reference)
            elif operation == "enable":
                source.enable(dlpx_obj.server_session, vdb_obj.reference)
            elif operation == "disable":
                source.disable(dlpx_obj.server_session, vdb_obj.reference)
            elif operation == "force_disable":
                disable_params = SourceDisableParameters()
                disable_params.attempt_cleanup = False
                source.disable(dlpx_obj.server_session, vdb_obj.reference,
                               disable_params)
            dlpx_obj.jobs[engine_name] = dlpx_obj.server_session.last_job
    except (RequestError, HttpError, JobError, AttributeError) as e:
        print_exception("An error occurred while performing {} on {}:\n"
                        "{}".format(operation, vdb_name, e))
    print("{} was successfully performed on {}.".format(operation, vdb_name))
Ejemplo n.º 14
0
def vdb_operation(vdb_name, operation):
    """
    Function to start, stop, enable or disable a VDB
    """
    print_debug('Searching for {} reference.\n'.format(vdb_name))

    vdb_obj = find_source_by_dbname(dx_session_obj.server_session, database,
                                    vdb_name)
    try:
        if vdb_obj:
            if operation == 'start':
                source.start(dx_session_obj.server_session, vdb_obj.reference)
            elif operation == 'stop':
                source.stop(dx_session_obj.server_session, vdb_obj.reference)
            elif operation == 'enable':
                source.enable(dx_session_obj.server_session, vdb_obj.reference)
            elif operation == 'disable':
                source.disable(dx_session_obj.server_session,
                               vdb_obj.reference)
            dx_session_obj.jobs[dx_session_obj.server_session.address] = \
                dx_session_obj.server_session.last_job

    except (RequestError, HttpError, JobError, AttributeError), e:
        print('An error occurred while performing {} on {}.:'
              '{}\n'.format(operation, vdb_name, e))
def main_workflow(engine, dlpx_obj):
    """
    This function is where we create our main workflow.
    Use the @run_async decorator to run this function asynchronously.
    The @run_async decorator allows us to run against multiple Delphix Engine
    simultaneously

    :param engine: Dictionary of engines
    :type engine: dictionary
    :param dlpx_obj: Virtualization Engine session object
    :type dlpx_obj: lib.GetSession.GetSession
    """

    try:
        #Setup the connection to the Delphix Engine
        dlpx_obj.serversess(engine['ip_address'], engine['username'],
                                  engine['password'])
    except DlpxException as e:
        print_exception('ERROR: Engine {} encountered an error while'
                        'rewinding {}:\n{}\n'.format(engine['hostname'],
                                                     arguments['--target'], e))

    thingstodo = ["thingtodo"]
    try:
        with dlpx_obj.job_mode(single_thread):
            while len(dlpx_obj.jobs) > 0 or len(thingstodo) > 0:
                if len(thingstodo) > 0:
                    rewind_database(dlpx_obj, arguments['--vdb'],
                                    arguments['--timestamp'],
                                    arguments['--timestamp_type'])
                    thingstodo.pop()

                # get all the jobs, then inspect them
                i = 0
                for j in dlpx_obj.jobs.keys():
                    job_obj = job.get(dlpx_obj.server_session,
                                      dlpx_obj.jobs[j])
                    print_debug(job_obj)
                    print_info('{}: Refresh of {}: {}'.format(
                        engine['hostname'], arguments['--vdb'],
                        job_obj.job_state))
                    if job_obj.job_state in ['CANCELED', 'COMPLETED', 'FAILED']:
                        # If the job is in a non-running state, remove it
                        # from the running jobs list.
                        del dlpx_obj.jobs[j]
                    elif job_obj.job_state in 'RUNNING':
                        # If the job is in a running state, increment the
                        # running job count.
                        i += 1
                    print_info('{}: {:d} jobs running.'.format(
                        engine['hostname'], i))
                    # If we have running jobs, pause before repeating the
                    # checks.
                    if len(dlpx_obj.jobs) > 0:
                        sleep(float(arguments['--poll']))
    except (DlpxException, RequestError, JobError, HttpError) as e:
        print_exception('Error in dx_rewind_vdb: {}\n{}'.format(
            engine['hostname'], e))
        sys.exit(1)
Ejemplo n.º 16
0
def main_workflow(engine, dlpx_obj):
    """
    This function is where we create our main workflow.
    Use the @run_async decorator to run this function asynchronously.
    The @run_async decorator allows us to run against multiple Delphix Engine
    simultaneously

    :param engine: Dictionary of engines
    :type engine: dictionary
    :param dlpx_obj: Virtualization Engine session object
    :type dlpx_obj: lib.GetSession.GetSession
    """

    try:
        #Setup the connection to the Delphix Engine
        dlpx_obj.serversess(engine['ip_address'], engine['username'],
                                  engine['password'])
    except DlpxException as e:
        print_exception('ERROR: Engine {} encountered an error while'
                        'rewinding {}:\n{}\n'.format(engine['hostname'],
                                                     arguments['--target'], e))

    thingstodo = ["thingtodo"]
    try:
        with dlpx_obj.job_mode(single_thread):
            while len(dlpx_obj.jobs) > 0 or len(thingstodo) > 0:
                if len(thingstodo) > 0:
                    rewind_database(dlpx_obj, arguments['--vdb'],
                                    arguments['--timestamp'],
                                    arguments['--timestamp_type'])
                    thingstodo.pop()

                # get all the jobs, then inspect them
                i = 0
                for j in dlpx_obj.jobs.keys():
                    job_obj = job.get(dlpx_obj.server_session,
                                      dlpx_obj.jobs[j])
                    print_debug(job_obj)
                    print_info('{}: Refresh of {}: {}'.format(
                        engine['hostname'], arguments['--vdb'],
                        job_obj.job_state))
                    if job_obj.job_state in ['CANCELED', 'COMPLETED', 'FAILED']:
                        # If the job is in a non-running state, remove it
                        # from the running jobs list.
                        del dlpx_obj.jobs[j]
                    elif job_obj.job_state in 'RUNNING':
                        # If the job is in a running state, increment the
                        # running job count.
                        i += 1
                    print_info('{}: {:d} jobs running.'.format(
                        engine['hostname'], i))
                    # If we have running jobs, pause before repeating the
                    # checks.
                    if len(dlpx_obj.jobs) > 0:
                        sleep(float(arguments['--poll']))
    except (DlpxException, RequestError, JobError, HttpError) as e:
        print_exception('Error in dx_rewind_vdb: {}\n{}'.format(
            engine['hostname'], e))
        sys.exit(1)
Ejemplo n.º 17
0
def main_workflow(engine):
    """
    This function actually runs the jobs.
    Use the @run_async decorator to run this function asynchronously.
    This allows us to run against multiple Delphix Engine simultaneously

    engine: Dictionary of engines
    """
    jobs = {}

    try:
        #Setup the connection to the Delphix Engine
        dx_session_obj.serversess(engine['ip_address'], engine['username'],
                                  engine['password'])

    except DlpxException as e:
        print_exception('\nERROR: Engine %s encountered an error while'
                        '%s:\n%s\n' %
                        (engine['hostname'], arguments['--target'], e))
        sys.exit(1)

    thingstodo = ["thingtodo"]
    #reset the running job count before we begin
    i = 0
    with dx_session_obj.job_mode(single_thread):
        while (len(jobs) > 0 or len(thingstodo) > 0):
            if len(thingstodo) > 0:
                if arguments['--pw']:
                    update_ase_db_pw()

                #elif OPERATION:
                #    method_call

                thingstodo.pop()

            #get all the jobs, then inspect them
            i = 0
            for j in jobs.keys():
                job_obj = job.get(dx_session_obj.server_session, jobs[j])
                print_debug(job_obj)
                print_info(engine["hostname"] + ": VDB Operations: " +
                           job_obj.job_state)

                if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
                    #If the job is in a non-running state, remove it from the
                    # running jobs list.
                    del jobs[j]
                else:
                    #If the job is in a running state, increment the running
                    # job count.
                    i += 1

            print_info(engine["hostname"] + ": " + str(i) + " jobs running. ")
            #If we have running jobs, pause before repeating the checks.
            if len(jobs) > 0:
                sleep(float(arguments['--poll']))
def main_workflow(engine):
    """
    This function actually runs the jobs.
    Use the @run_async decorator to run this function asynchronously.
    This allows us to run against multiple Delphix Engine simultaneously

    engine: Dictionary of engines
    """
    jobs = {}

    try:
        #Setup the connection to the Delphix Engine
        dx_session_obj.serversess(engine['ip_address'], engine['username'],
                                  engine['password'])

    except DlpxException as e:
        print_exception('\nERROR: Engine {} encountered an error while'
                        '{}:\n{}\n'.format(engine['hostname'],
                        arguments['--target'], e))
        sys.exit(1)

    thingstodo = ["thingtodo"]
    #reset the running job count before we begin
    i = 0
    with dx_session_obj.job_mode(single_thread):
        while (len(jobs) > 0 or len(thingstodo)> 0):
            if len(thingstodo)> 0:

                #if OPERATION:
                find_missing_archivelogs(engine['hostname'])

                thingstodo.pop()

            #get all the jobs, then inspect them
            i = 0
            for j in jobs.keys():
                job_obj = job.get(dx_session_obj.server_session, jobs[j])
                print_debug(job_obj)
                print_info('{}: VDB Operations:{}\n'.format(engine['hostname'],
                           job_obj.job_state))

                if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
                    #If the job is in a non-running state, remove it from the
                    # running jobs list.
                    del jobs[j]
                else:
                    #If the job is in a running state, increment the running
                    # job count.
                    i += 1

            print_info(engine["hostname"] + ": " + str(i) + " jobs running. ")
            #If we have running jobs, pause before repeating the checks.
            if len(jobs) > 0:
                sleep(float(arguments['--poll']))
Ejemplo n.º 19
0
def main_workflow(engine):
    """
    This function actually runs the jobs.
    Use the @run_async decorator to run this function asynchronously.
    This allows us to run against multiple Delphix Engine simultaneously

    engine: Dictionary of engines
    """
    jobs = {}

    try:
        #Setup the connection to the Delphix Engine
        dx_session_obj.serversess(engine['ip_address'], engine['username'],
                                  engine['password'])

    except DlpxException as e:
        print_exception('\nERROR: Engine {} encountered an error while'
                        '{}:\n{}\n'.format(engine['hostname'],
                                           arguments['--target'], e))
        sys.exit(1)

    thingstodo = ["thingtodo"]
    with dx_session_obj.job_mode(single_thread):
        while len(dx_session_obj.jobs) > 0 or len(thingstodo) > 0:
            if len(thingstodo) > 0:

                if arguments['--list']:
                    list_jobs()
                thingstodo.pop()

            # get all the jobs, then inspect them
            i = 0
            for j in dx_session_obj.jobs.keys():
                job_obj = job.get(dx_session_obj.server_session,
                                  dx_session_obj.jobs[j])
                print_debug(job_obj)
                print_info('{}: Operations: {}'.format(engine['hostname'],
                                                       job_obj.job_state))
                if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
                    # If the job is in a non-running state, remove it from the
                    # running jobs list.
                    del dx_session_obj.jobs[j]
                elif job_obj.job_state in 'RUNNING':
                    # If the job is in a running state, increment the running
                    # job count.
                    i += 1

                print_info('{}: {:d} jobs running.'.format(
                    engine['hostname'], i))

            # If we have running jobs, pause before repeating the checks.
            if len(dx_session_obj.jobs) > 0:
                sleep(float(arguments['--poll']))
def create_windows_env(dlpx_obj, env_name, host_user, ip_addr,
                     pw=None, connector_name=None):

    """
    Create a Windows environment.

    env_name: The name of the environment
    host_user: The server account used to authenticate 
    ip_addr: DNS name or IP address of the environment
    toolkit_path: Path to the toolkit. Note: This directory must be 
                  writable by the host_user
    pw: Password of the user. Default: None (use SSH keys instead)
    """
    engine_name = dlpx_obj.dlpx_engines.keys()[0]

    env_params_obj = HostEnvironmentCreateParameters()

    print_debug('Creating the environment with a password')

    env_params_obj.primary_user = {'type': 'EnvironmentUser',
                                      'name': host_user,
                                      'credential': {
                                      'type': 'PasswordCredential',
                                      'password': pw }}

    env_params_obj.host_parameters = {'type': 'WindowsHostCreateParameters',
                                     'host': { 'address': ip_addr,
                                     'type': 'WindowsHost',
                                     'name': env_name,
                                     'connectorPort': 9100}}

    env_params_obj.host_environment = WindowsHostEnvironment()
    env_params_obj.host_environment.name = env_name

    if connector_name:
      env_obj = find_obj_by_name(dlpx_obj.server_session, environment,
                                   connector_name)

      if env_obj:
        env_params_obj.host_environment.proxy = env_obj.host
      elif env_obj is None:
        print('Host was not found in the Engine: {}'.format(arguments[--connector_name]))
        sys.exit(1)

    try:
        environment.create(dlpx_obj.server_session,
                           env_params_obj)
        dlpx_obj.jobs[engine_name] = \
                                   dlpx_obj.server_session.last_job

    except (DlpxException, RequestError, HttpError) as e:
        print('\nERROR: Encountered an exception while creating the '
              'environment:\n{}'.format(e))
def main_workflow(engine):
    """
    This function actually runs the jobs.
    Use the @run_async decorator to run this function asynchronously.
    This allows us to run against multiple Delphix Engine simultaneously

    engine: Dictionary of engines
    """
    jobs = {}

    try:
        #Setup the connection to the Delphix Engine
        dx_session_obj.serversess(engine['ip_address'], engine['username'],
                                  engine['password'])

    except DlpxException as e:
        print_exception('\nERROR: Engine {} encountered an error while' 
                        '{}:\n{}\n'.format(engine['hostname'],
                        arguments['--target'], e))
        sys.exit(1)

    thingstodo = ["thingtodo"]
    with dx_session_obj.job_mode(single_thread):
        while len(dx_session_obj.jobs) > 0 or len(thingstodo) > 0:
            if len(thingstodo)> 0:
                refresh_database(arguments['--vdb'],
                                    arguments['--timestamp'],
                                    arguments['--timestamp_type'])
                thingstodo.pop()

            #get all the jobs, then inspect them
            i = 0
            for j in dx_session_obj.jobs.keys():
                job_obj = job.get(dx_session_obj.server_session,
                                  dx_session_obj.jobs[j])
                print_debug(job_obj)
                print_info('{}: Operations: {}'.format(engine['hostname'],
                                                       job_obj.job_state))
                if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
                    #If the job is in a non-running state, remove it from the
                    # running jobs list.
                    del dx_session_obj.jobs[j]
                elif job_obj.job_state in 'RUNNING':
                    #If the job is in a running state, increment the running
                    # job count.
                    i += 1

                print_info('{}: {:d} jobs running.'.format(
                    engine['hostname'], i))

            #If we have running jobs, pause before repeating the checks.
            if len(dx_session_obj.jobs) > 0:
                sleep(float(arguments['--poll']))
Ejemplo n.º 22
0
def create_windows_env(engine, env_name, host_user, ip_addr,
                     pw=None, connector_name=None):

    """
    Create a Windows environment.

    env_name: The name of the environment
    host_user: The server account used to authenticate 
    ip_addr: DNS name or IP address of the environment
    toolkit_path: Path to the toolkit. Note: This directory must be 
                  writable by the host_user
    pw: Password of the user. Default: None (use SSH keys instead)
    """
    engine_name = dlpx_obj.dlpx_engines.keys()[0]

    env_params_obj = HostEnvironmentCreateParameters()

    print_debug('Creating the environment with a password')

    env_params_obj.primary_user = {'type': 'EnvironmentUser',
                                      'name': host_user,
                                      'credential': {
                                      'type': 'PasswordCredential',
                                      'password': pw }}

    env_params_obj.host_parameters = {'type': 'WindowsHostCreateParameters',
                                     'host': { 'address': ip_addr,
                                     'type': 'WindowsHost',
                                     'name': env_name,
                                     'connectorPort': 9100}}

    env_params_obj.host_environment = WindowsHostEnvironment()
    env_params_obj.host_environment.name = env_name

    if connector_name:
      env_obj = find_obj_by_name(dlpx_obj.server_session, environment,
                                   connector_name)

      if env_obj:
        env_params_obj.host_environment.proxy = env_obj.host
      elif env_obj is None:
        print('Host was not found in the Engine: {}'.format(arguments[--connector_name]))
        sys.exit(1)

    try:
        environment.create(dlpx_obj.server_session,
                           env_params_obj)
        dlpx_obj.jobs[engine_name] = \
                                   dlpx_obj.server_session.last_job

    except (DlpxException, RequestError, HttpError) as e:
        print('\nERROR: Encountered an exception while creating the '
              'environment:\n{}'.format(e))
def find_database_by_name_and_group_name(engine, server, group_name, 
                                         database_name):

    databases = find_all_databases_by_group_name(engine, server, group_name)

    for each in databases:
        if each.name == database_name:
            print_debug('%s: Found a match %s' % (engine['hostname'],
                        str(each.reference)))
            return each

    print_info('%s unable to find %s in %s' % (engine['hostname'],
                                               database_name, group_name))
Ejemplo n.º 24
0
def find_database_by_name_and_group_name(engine, server, group_name,
                                         database_name):

    databases = find_all_databases_by_group_name(engine, server, group_name)

    for each in databases:
        if each.name == database_name:
            print_debug(engine["hostname"] + ": Found a match " +
                        str(each.reference))
            return each

    print_info(engine["hostname"] + ': Unable to find "' + database_name +
               '" in ' + group_name)
def find_database_by_name_and_group_name(engine, server, group_name, 
                                         database_name):

    databases = find_all_databases_by_group_name(engine, server, group_name)

    for each in databases:
        if each.name == database_name:
            print_debug(engine["hostname"] + ": Found a match " + 
                        str(each.reference))
            return each

    print_info(engine["hostname"] + ": Unable to find \"" + 
               database_name + "\" in " + group_name)
Ejemplo n.º 26
0
def find_database_by_name_and_group_name(engine, server, group_name,
                                         database_name):

    databases = find_all_databases_by_group_name(engine, server, group_name)

    for each in databases:
        if each.name == database_name:
            print_debug('%s: Found a match %s' %
                        (engine['hostname'], str(each.reference)))
            return each

    print_info('%s unable to find %s in %s' %
               (engine['hostname'], database_name, group_name))
def job_mode(server):
    """
    This function tells Delphix how to execute jobs, based on the 
    single_thread variable at the beginning of the file
    """
    #Synchronously (one at a time)
    if single_thread == True:
        job_m = job_context.sync(server)
        print_debug("These jobs will be executed synchronously")
    #Or asynchronously
    else:
        job_m = job_context.async(server)
        print_debug("These jobs will be executed asynchronously")
    return job_m
Ejemplo n.º 28
0
def job_mode(server):
    """
    This function tells Delphix how to execute jobs, based on the
    single_thread variable at the beginning of the file
    """
    # Synchronously (one at a time)
    if single_thread == True:
        job_m = job_context.sync(server)
        print_debug("These jobs will be executed synchronously")
    # Or asynchronously
    else:
        job_m = job_context. async (server)
        print_debug("These jobs will be executed asynchronously")
    return job_m
Ejemplo n.º 29
0
def job_wait():
    """
    This job stops all work in the thread/process until all jobs on the
    engine are completed.
    """
    # Grab all the jos on the server (the last 25, be default)
    all_jobs = job.get_all(server)
    # For each job in the list, check to see if it is running (not ended)
    for jobobj in all_jobs:
        if not (jobobj.job_state in ["CANCELED", "COMPLETED", "FAILED"]):
            print_debug("Waiting for " + jobobj.reference + " (currently: " +
                        jobobj.job_state +
                        ") to finish running against the container")

            # If so, wait
            job_context.wait(server, jobobj.reference)
def job_wait():
    """
    This job stops all work in the thread/process until all jobs on the 
    engine are completed.
    """
    #Grab all the jos on the server (the last 25, be default)
    all_jobs = job.get_all(server)
    #For each job in the list, check to see if it is running (not ended)
    for jobobj in all_jobs:
        if not (jobobj.job_state in ["CANCELED", "COMPLETED", "FAILED"]):
            print_debug("Waiting for " + jobobj.reference + " (currently: " + 
                        jobobj.job_state + 
                        ") to finish running against the container")

            #If so, wait
            job_context.wait(server,jobobj.reference)
Ejemplo n.º 31
0
    def job_wait(self):
        """
        This job stops all work in the thread/process until jobs are completed.

        No arguments
        """
        #Grab all the jos on the server (the last 25, be default)
        all_jobs = job.get_all(self.server_session)

        #For each job in the list, check to see if it is running (not ended)
        for jobobj in all_jobs:
            if not (jobobj.job_state in ["CANCELED", "COMPLETED", "FAILED"]):
                print_debug('\nDEBUG: Waiting for %s (currently: %s) to '
                            'finish running against the container.\n' %
                            (jobobj.reference, jobobj.job_state))

                #If so, wait
                job_context.wait(self.server_session, jobobj.reference)
Ejemplo n.º 32
0
    def job_mode(self, single_thread=True):
        """
        This method tells Delphix how to execute jobs, based on the
        single_thread variable

        single_thread: Execute application synchronously (True) or
                       async (False)
                       Default: True
        """

        #Synchronously (one at a time)
        if single_thread is True:
            print_debug("These jobs will be executed synchronously")
            return job_context.sync(self.server_session)

        #Or asynchronously
        elif single_thread is False:
            print_debug("These jobs will be executed asynchronously")
            return job_context.async(self.server_session)
Ejemplo n.º 33
0
def find_dbrepo_by_environment_ref_and_name(engine, repo_type,
                                            f_environment_ref, f_name):
    '''
    Function to find database repository objects by environment reference and 
    name, and return the object's reference as a string
    You might use this function to find MSSQL database repos.
    '''

    print_debug(
        '%s: Searching objects in the %s class for one with the '
        'environment reference of %s and a name of %s.' %
        (engine['hostname'], repo_type, f_environment_ref, f_name), debug)

    obj_ref = ''
    all_objs = repository.get_all(server, environment=f_environment_ref)

    for obj in all_objs:
        if (repo_type == 'MSSqlInstance' or repo_type == 'ASEInstance'):
            if (obj.type == repo_type and obj.name == f_name):
                print_debug(
                    '%s: Found a match %s' %
                    (engine['hostname'], str(obj.reference)), debug)
                return obj

        elif repo_type == 'Unstructured Files':
            if obj.value == install_type:
                print_debug(
                    '%s: Found a match %s' %
                    (engine['hostname'], str(obj.reference)), debug)
                return obj

    raise DlpxException('%s: No Repo match found for type %s\n' %
                        (engine['hostname'], repo_type))
Ejemplo n.º 34
0
def find_dbrepo_by_environment_ref_and_install_path(engine, server,
                                                    install_type,
                                                    f_environment_ref,
                                                    f_install_path):
    '''
    Function to find database repository objects by environment reference and 
    install path, and return the object's reference as a string
    You might use this function to find Oracle and PostGreSQL database repos.
    '''
    print_debug(
        '%s: Searching objects in the %s class for one with the '
        'environment reference of %s and an install path of %s' %
        (engine['hostname'], install_type, f_environment_ref, f_install_path),
        debug)

    for obj in repository.get_all(server, environment=f_environment_ref):
        if install_type == 'PgSQLInstall':
            if (obj.type == install_type
                    and obj.installation_path == f_install_path):
                print_debug(
                    '%s: Found a match %s' %
                    (engine['hostname'], str(obj.reference)), debug)
                return obj

        elif install_type == 'OracleInstall':
            if (obj.type == install_type
                    and obj.installation_home == f_install_path):

                print_debug(
                    '%s: Fount a match %s' %
                    (engine['hostname'], str(obj.reference)), debug)
                return obj
        else:
            raise DlpxException('%s: No Repo match found for type %s.\n' %
                                (engine["hostname"], install_type))
Ejemplo n.º 35
0
def find_snapshot_by_database_and_time(engine, server, database_obj,
                                       snap_time):
    """
    Find snapshot object by database name and timetamp
    engine:
    server: A Delphix engine object.
    database_obj: The database reference to retrieve the snapshot
    snap_time: timstamp of the snapshot
    """
    snapshots = snapshot.get_all(server, database=database_obj.reference)
    matches = []

    for snapshot_obj in snapshots:
        if (str(snapshot_obj.latest_change_point.timestamp) == snap_time or
                str(snapshot_obj.first_change_point.timestamp) == snap_time):

            matches.append(snapshot_obj)

    if len(matches) == 1:
        snap_match = get_obj_name(server, database, matches[0].container)
        print_debug(engine["hostname"] +
                    ": Found one and only one match. This is good.")
        print_debug(engine["hostname"] + ": " + snap_match)

        return matches[0]

    elif len(matches) > 1:
        print_debug(engine["hostname"] + ": " + matches)
        raise DlpxException("The time specified was not specific enough."
                            " More than one match found.\n")
    else:
        raise DlpxException("No matches found for the time specified.\n")
def find_snapshot_by_database_and_time(engine, server, database_obj, snap_time):
    """
    Find snapshot object by database name and timetamp
    engine: 
    server: A Delphix engine object.
    database_obj: The database reference to retrieve the snapshot
    snap_time: timstamp of the snapshot
    """
    snapshots = snapshot.get_all(server, database=database_obj.reference)
    matches = []

    for snapshot_obj in snapshots:
        if str(snapshot_obj.latest_change_point.timestamp) == snap_time \
               or str(snapshot_obj.first_change_point.timestamp) == snap_time:

            matches.append(snapshot_obj)

    if len(matches) == 1:
        snap_match = get_obj_name(server, database, matches[0].container)
        print_debug(engine['hostname'] + 
                    ': Found one and only one match. This is good.')
        print_debug(engine['hostname'] + ': ' + snap_match)


        return matches[0]

    elif len(matches) > 1:
        print_debug(engine["hostname"] + ": " + matches)
        raise DlpxException('The time specified was not specific enough.'
                    ' More than one match found.\n')
    else:
        raise DlpxException('No matches found for the time specified.\n')
def find_dbrepo_by_environment_ref_and_install_path(engine, server, 
                                                    install_type, 
                                                    f_environment_ref, 
                                                    f_install_path):
    '''
    Function to find database repository objects by environment reference and 
    install path, and return the object's reference as a string
    You might use this function to find Oracle and PostGreSQL database repos.
    '''
    print_debug('%s: Searching objects in the %s class for one with the '
                'environment reference of %s and an install path of %s' %
                (engine['hostname'], install_type, f_environment_ref,
                f_install_path), debug)

    for obj in repository.get_all(server, environment=f_environment_ref):
        if install_type == 'PgSQLInstall':
            if (obj.type == install_type and
                obj.installation_path == f_install_path):
                print_debug('%s: Found a match %s' % (engine['hostname'],
                            str(obj.reference)), debug)
                return obj

        elif install_type == 'OracleInstall':
            if (obj.type == install_type and 
                obj.installation_home == f_install_path):

                print_debug('%s: Fount a match %s' % (engine['hostname'],
                            str(obj.reference)), debug)
                return obj
        else:
            raise DlpxException('%s: No Repo match found for type %s.\n' %
                                (engine["hostname"], install_type))
def find_repo_by_environment_ref(engine, repo_type, f_environment_ref,
                                 f_install_path=None):
    '''
    Function to find unstructured file repository objects by environment 
    reference and name, and return the object's reference as a string
    You might use this function to find Unstructured File repos.
    '''

    print_debug('\n%s: Searching objects in the %s class for one with the'
                'environment reference of %s\n' % 
                (engine['hostname'], repo_type, f_environment_ref), debug)

    obj_ref = ''
    all_objs = repository.get_all(dx_session_obj.server_session,
                                  environment=f_environment_ref)

    for obj in all_objs:
        if obj.name == repo_type:
             print_debug(engine['hostname'] + ': Found a match ' + 
                        str(obj.reference))
             return obj

        elif obj.type == repo_type:
             print_debug('%s Found a match %s' % (engine['hostname'],
                         str(obj.reference)), debug)
             return obj

    raise DlpxException('%s: No Repo match found for type %s\n' % (
                        engine['hostname'], repo_type))
def find_dbrepo_by_environment_ref_and_name(engine, repo_type, 
                                            f_environment_ref, f_name):
    '''
    Function to find database repository objects by environment reference and 
    name, and return the object's reference as a string
    You might use this function to find MSSQL database repos.
    '''

    print_debug('%s: Searching objects in the %s class for one with the '
                'environment reference of %s and a name of %s.' %
                (engine['hostname'], repo_type, f_environment_ref, f_name),
                debug)

    obj_ref = ''
    all_objs = repository.get_all(server, environment=f_environment_ref)

    for obj in all_objs:
        if (repo_type == 'MSSqlInstance' or repo_type == 'ASEInstance'):
            if (obj.type == repo_type and obj.name == f_name):
                print_debug('%s: Found a match %s' % (engine['hostname'],
                            str(obj.reference)), debug)
                return obj

        elif repo_type == 'Unstructured Files':
            if obj.value == install_type:
                print_debug('%s: Found a match %s' % (engine['hostname'],
                            str(obj.reference)), debug)
                return obj

    raise DlpxException('%s: No Repo match found for type %s\n' %
                        (engine['hostname'], repo_type))
Ejemplo n.º 40
0
def find_snapshot_by_database_and_name(engine, server, database_obj,
                                       snap_name):
    snapshots = snapshot.get_all(server, database=database_obj.reference)
    matches = []
    for snapshot_obj in snapshots:
        if str(snapshot_obj.name).startswith(arguments["--timestamp"]):
            matches.append(snapshot_obj)

    if len(matches) == 1:

        print_debug(engine["hostname"] +
                    ": Found one and only one match. This is good.")
        print_debug(engine["hostname"] + ": " + matches[0])

        return matches[0]

    elif len(matches) > 1:
        print_error("The name specified was not specific enough. "
                    "More than one match found.")

        for each in matches:
            print_debug(engine["hostname"] + ": " + each.name)
    else:
        print_error("No matches found for the time specified")
    print_error("No matching snapshot found")
Ejemplo n.º 41
0
def find_repo_by_environment_ref(
    engine, repo_type, f_environment_ref, f_install_path=None
):
    """
    Function to find unstructured file repository objects by environment
    reference and name, and return the object's reference as a string
    You might use this function to find Unstructured File repos.
    """

    print_debug(
        "\n%s: Searching objects in the %s class for one with the"
        "environment reference of %s\n"
        % (engine["hostname"], repo_type, f_environment_ref),
        debug,
    )

    obj_ref = ""
    all_objs = repository.get_all(
        dx_session_obj.server_session, environment=f_environment_ref
    )

    for obj in all_objs:
        if obj.name == repo_type:
            print_debug(engine["hostname"] + ": Found a match " + str(obj.reference))
            return obj

        elif obj.type == repo_type:
            print_debug(
                "%s Found a match %s" % (engine["hostname"], str(obj.reference)), debug
            )
            return obj

    raise DlpxException(
        "%s: No Repo match found for type %s\n" % (engine["hostname"], repo_type)
    )
def vdb_operation(vdb_name, operation):
    """
    Function to start, stop, enable or disable a VDB
    """
    print_debug('Searching for %s reference.\n' % (vdb_name))

    vdb_obj = find_obj_by_name(dx_session_obj.server_session, source, vdb_name)

    try:
        if vdb_obj:
            if operation == 'start':
                source.start(dx_session_obj.server_session, vdb_obj.reference)
            elif operation == 'stop':
                source.stop(dx_session_obj.server_session, vdb_obj.reference)
            elif operation == 'enable':
                source.enable(dx_session_obj.server_session, vdb_obj.reference)
            elif operation == 'disable':
                source.disable(dx_session_obj.server_session,
                               vdb_obj.reference)

    except (RequestError, HttpError, JobError, AttributeError), e:
        raise DlpxException('An error occurred while performing ' +
                            operation + ' on ' + vdb_name + '.:%s\n' % (e))
Ejemplo n.º 43
0
def update_jobs_dictionary(engine, server, jobs):
    """
    This function checks each job in the dictionary and updates its status or
    removes it if the job is complete.
    Return the number of jobs still running.
    """
    # Establish the running jobs counter, as we are about to update the count
    # from the jobs report.
    i = 0
    # get all the jobs, then inspect them
    for j in jobs.keys():
        job_obj = job.get(server, jobs[j])
        print_debug("%s: %s" % (engine["hostname"], str(job_obj)), debug)
        print_info("%s: %s: %s" % (engine["hostname"], j.name, job_obj.job_state))

        if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
            # If the job is in a non-running state, remove it from the running
            # jobs list.
            del jobs[j]
        else:
            # If the job is in a running state, increment the running job count.
            i += 1
    return i
def update_jobs_dictionary(engine, server, jobs):
    """
    This function checks each job in the dictionary and updates its status or 
    removes it if the job is complete.
    Return the number of jobs still running.
    """
    #Establish the running jobs counter, as we are about to update the count 
    # from the jobs report.
    i = 0
    #get all the jobs, then inspect them
    for j in jobs.keys():
        job_obj = job.get(server, jobs[j])
        print_debug('%s: %s' % (engine['hostname'], str(job_obj)), debug)
        print_info('%s: %s: %s' % (engine['hostname'], j.name,
                   job_obj.job_state))
        
        if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
            #If the job is in a non-running state, remove it from the running 
            # jobs list.
            del jobs[j]
        else:
            #If the job is in a running state, increment the running job count.
            i += 1
    return i
def dx_obj_operation(dlpx_obj, vdb_name, operation):
    """
    Function to start, stop, enable or disable a VDB

    :param dlpx_obj: Virtualization Engine session object
    :type dlpx_obj: lib.GetSession.GetSession
    :param vdb_name: Name of the object to stop/start/enable/disable
    :type vdb_name: str
    :param operation: enable or disable dSources and VDBs
    :type operation: str
    """

    print_debug('Searching for {} reference.\n'.format(vdb_name))
    engine_name = dlpx_obj.dlpx_engines.keys()[0]
    vdb_obj = find_obj_by_name(dlpx_obj.server_session, source, vdb_name)
    try:
        if vdb_obj:
            if operation == 'start':
                source.start(dlpx_obj.server_session, vdb_obj.reference)
            elif operation == 'stop':
                source.stop(dlpx_obj.server_session, vdb_obj.reference)
            elif operation == 'enable':
                source.enable(dlpx_obj.server_session, vdb_obj.reference)
            elif operation == 'disable':
                source.disable(dlpx_obj.server_session,
                               vdb_obj.reference)
            elif operation == 'force_disable':
                disable_params = SourceDisableParameters()
                disable_params.attempt_cleanup = False
                source.disable(dlpx_obj.server_session,
                               vdb_obj.reference,
                               disable_params)
            dlpx_obj.jobs[engine_name] = dlpx_obj.server_session.last_job
    except (RequestError, HttpError, JobError, AttributeError), e:
        print_exception('An error occurred while performing {} on {}:\n'
                        '{}'.format(operation, vdb_name, e))
Ejemplo n.º 46
0
def find_dbrepo_by_environment_ref_and_name(
    engine, repo_type, f_environment_ref, f_name
):
    """
    Function to find database repository objects by environment reference and
    name, and return the object's reference as a string
    You might use this function to find MSSQL database repos.
    """

    print_debug(
        "%s: Searching objects in the %s class for one with the "
        "environment reference of %s and a name of %s."
        % (engine["hostname"], repo_type, f_environment_ref, f_name),
        debug,
    )

    obj_ref = ""
    all_objs = repository.get_all(server, environment=f_environment_ref)

    for obj in all_objs:
        if repo_type == "MSSqlInstance" or repo_type == "ASEInstance":
            if obj.type == repo_type and obj.name == f_name:
                print_debug(
                    "%s: Found a match %s" % (engine["hostname"], str(obj.reference)),
                    debug,
                )
                return obj

        elif repo_type == "Unstructured Files":
            if obj.value == install_type:
                print_debug(
                    "%s: Found a match %s" % (engine["hostname"], str(obj.reference)),
                    debug,
                )
                return obj

    raise DlpxException(
        "%s: No Repo match found for type %s\n" % (engine["hostname"], repo_type)
    )
def find_snapshot_by_database_and_name(engine, server, database_obj, snap_name):
    snapshots = snapshot.get_all(server, database=database_obj.reference)
    matches = []
    for snapshot_obj in snapshots:
        if str(snapshot_obj.name).startswith(arguments['--timestamp']):
            matches.append(snapshot_obj)

    if len(matches) == 1:

        print_debug(engine["hostname"] + 
                    ": Found one and only one match. This is good.")
        print_debug(engine["hostname"] + ": " + matches[0])

        return matches[0]

    elif len(matches) > 1:
        print_error("The name specified was not specific enough. " 
                    "More than one match found.")

        for each in matches:
            print_debug(engine["hostname"] + ": " + each.name)
    else:
        print_error("No matches found for the time specified")
    print_error("No matching snapshot found")
Ejemplo n.º 48
0
def main(arguments):
    #We want to be able to call on these variables anywhere in the script.
    global single_thread
    global usebackup
    global time_start
    global config_file_path
    global dx_session_obj
    global debug

    if arguments['--debug']:
        debug = True

    try:
        dx_session_obj = GetSession()
        logging_est(arguments['--logdir'])
        print_debug(arguments)
        time_start = time()
        engine = None
        single_thread = False
        config_file_path = arguments['--config']
        #Parse the dxtools.conf and put it into a dictionary
        dx_session_obj.get_config(config_file_path)

        #This is the function that will handle processing main_workflow for
        # all the servers.
        run_job()

        #elapsed_minutes = time_elapsed()
        print_info('script took {:.2f} minutes to get this far.'.format(
            time_elapsed()))

    #Here we handle what we do when the unexpected happens
    except SystemExit as e:
        """
        This is what we use to handle our sys.exit(#)
        """
        sys.exit(e)

    except HttpError as e:
        """
        We use this exception handler when our connection to Delphix fails
        """
        print_exception('Connection failed to the Delphix Engine'
                        'Please check the ERROR message:\n{}\n').format(e)
        sys.exit(1)

    except JobError as e:
        """
        We use this exception handler when a job fails in Delphix so that
        we have actionable data
        """
        elapsed_minutes = time_elapsed()
        print_exception('A job failed in the Delphix Engine')
        print_info('{} took {:.2f} minutes to get this far:\n{}\n'.format(
            basename(__file__), elapsed_minutes, e))
        sys.exit(3)

    except KeyboardInterrupt:
        """
        We use this exception handler to gracefully handle ctrl+c exits
        """
        print_debug("You sent a CTRL+C to interrupt the process")
        elapsed_minutes = time_elapsed()
        print_info('{} took {:.2f} minutes to get this far\n'.format(
            basename(__file__), elapsed_minutes))

    except:
        """
        Everything else gets caught here
        """
        print_exception(sys.exc_info()[0])
        elapsed_minutes = time_elapsed()
        print_info('{} took {:.2f} minutes to get this far\n'.format(
            basename(__file__), elapsed_minutes))
        sys.exit(1)
def set_timeflow_point(engine, server, container_obj):
    """
    This returns the reference of the timestamp specified.
    """

    if arguments['--timestamp_type'].upper() == "SNAPSHOT":
        if arguments['--timestamp'].upper() == "LATEST":
            print_debug('%s: Using the latest Snapshot.' % 
                        (engine['hostname']), debug)

            timeflow_point_parameters = TimeflowPointSemantic()
            timeflow_point_parameters.container = container_obj.reference
            timeflow_point_parameters.location = "LATEST_SNAPSHOT"

        elif arguments['--timestamp'].startswith("@"):
            print_debug('%s: Using a named snapshot' % (engine['hostname']),
                        debug)

            snapshot_obj = find_snapshot_by_database_and_name(engine, server, 
                                                     container_obj, 
                                                     arguments['--timestamp'])

            if snapshot_obj != None:
                timeflow_point_parameters=TimeflowPointLocation()
                timeflow_point_parameters.timeflow = snapshot_obj.timeflow
                timeflow_point_parameters.location = \
                               snapshot_obj.latest_change_point.location

            else:
                raise DlpxException('%s: Was unable to use the specified '
                                    'snapshot %s for database %s\n' % 
                                    (engine['hostname'],
                                    arguments['--timestamp'],
                                    container_obj.name))

        else:
            print_debug('%s: Using a time-designated snapshot' %
                        (engine['hostname']), debug)

            snapshot_obj = find_snapshot_by_database_and_time(engine, server, 
                                                     container_obj, 
                                                     arguments['--timestamp'])
            if snapshot_obj != None:
                timeflow_point_parameters=TimeflowPointTimestamp()
                timeflow_point_parameters.timeflow = snapshot_obj.timeflow
                timeflow_point_parameters.timestamp = \
                               snapshot_obj.latest_change_point.timestamp
            else:
                raise DlpxException('%s: Was unable to find a suitable time '
                                    ' for %s for database %s.\n' %
                                    (engine['hostname'],
                                    arguments['--timestamp'],
                                    container_obj.name))

    elif arguments['--timestamp_type'].upper() == "TIME":
        if arguments['--timestamp'].upper() == "LATEST":
            timeflow_point_parameters = TimeflowPointSemantic()
            timeflow_point_parameters.location = "LATEST_POINT"
        else:
            raise DlpxException('%s: Only support a --timestamp value of '
                                '"latest" when used with timestamp_type '
                                'of time' %s (engine['hostname']))

    else:
        raise DlpxException('%s is not a valied timestamp_type. Exiting\n' %
                            (arguments['--timestamp_type']))

    timeflow_point_parameters.container = container_obj.reference
    return timeflow_point_parameters
def main():
    # We want to be able to call on these variables anywhere in the script.
    global single_thread
    global debug

    time_start = time()
    single_thread = False

    try:
        dx_session_obj = GetSession()
        logging_est(arguments['--logdir'])
        print_debug(arguments)
        config_file_path = arguments['--config']
        # Parse the dxtools.conf and put it into a dictionary
        dx_session_obj.get_config(config_file_path)

        # This is the function that will handle processing main_workflow for
        # all the servers.
        run_job(dx_session_obj, config_file_path)

        elapsed_minutes = time_elapsed(time_start)
        print_info('script took {:.2f} minutes to get this far.'.format(
            elapsed_minutes))

    # Here we handle what we do when the unexpected happens
    except SystemExit as e:
        # This is what we use to handle our sys.exit(#)
        sys.exit(e)

    except DlpxException as e:
        # We use this exception handler when an error occurs in a function call.
        print_exception('ERROR: Please check the ERROR message below:\n'
                        '{}'.format(e.message))
        sys.exit(2)

    except HttpError as e:
        # We use this exception handler when our connection to Delphix fails
        print_exception('ERROR: Connection failed to the Delphix Engine. Please'
                        'check the ERROR message below:\n{}'.format(e.message))
        sys.exit(2)

    except JobError as e:
        # We use this exception handler when a job fails in Delphix so that we
        # have actionable data
        print_exception('A job failed in the Delphix Engine:\n{}'.format(e.job))
        elapsed_minutes = time_elapsed(time_start)
        print_exception('{} took {:.2f} minutes to get this far'.format(
            basename(__file__), elapsed_minutes))
        sys.exit(3)

    except KeyboardInterrupt:
        # We use this exception handler to gracefully handle ctrl+c exits
        print_debug('You sent a CTRL+C to interrupt the process')
        elapsed_minutes = time_elapsed(time_start)
        print_info('{} took {:.2f} minutes to get this far'.format(
            basename(__file__), elapsed_minutes))
    except:
        # Everything else gets caught here
        print_exception('{}\n{}'.format(sys.exc_info()[0],
                                        traceback.format_exc()))
        elapsed_minutes = time_elapsed(time_start)
        print_info("{} took {:.2f} minutes to get this far".format(
            basename(__file__), elapsed_minutes))
        sys.exit(1)
Ejemplo n.º 51
0
def main_workflow(engine, dlpx_obj):
    """
    This function is where we create our main workflow.
    Use the @run_async decorator to run this function asynchronously.
    The @run_async decorator allows us to run against multiple Delphix Engine
    simultaneously

    :param engine: Dictionary of engines
    :type engine: dictionary
    :param dlpx_obj: Virtualization Engine session object
    :type dlpx_obj: lib.GetSession.GetSession
    """

    try:
        # Setup the connection to the Delphix Engine
        dlpx_obj.serversess(engine['ip_address'], engine['username'],
                            engine['password'])
    except DlpxException as e:
        print_exception('ERROR: js_bookmark encountered an error authenticating'
                        ' to {} {}:\n{}\n'.format(engine['hostname'],
                                                  arguments['--target'], e))
        sys.exit(1)

    thingstodo = ["thingtodo"]
    try:
        with dlpx_obj.job_mode(single_thread):
            while len(dlpx_obj.jobs) > 0 or len(thingstodo) > 0:
                if len(thingstodo) > 0:
                    if arguments['--create_bookmark']:
                        create_bookmark(dlpx_obj,
                                        arguments['--create_bookmark'],
                                        arguments['--data_layout'],
                                        arguments['--branch_name']
                                        if arguments['--branch_name']
                                        else None,
                                        arguments['--tags']
                                        if arguments['--tags'] else None,
                                        arguments['--description']
                                        if arguments['--description'] else None)
                    elif arguments['--delete_bookmark']:
                        delete_bookmark(dlpx_obj,
                                        arguments['--delete_bookmark'])
                    elif arguments['--update_bookmark']:
                        update_bookmark(dlpx_obj,
                                        arguments['--update_bookmark'])
                    elif arguments['--share_bookmark']:
                        share_bookmark(dlpx_obj,
                                       arguments['--share_bookmark'])
                    elif arguments['--unshare_bookmark']:
                        unshare_bookmark(dlpx_obj,
                                         arguments['--unshare_bookmark'])
                    elif arguments['--list_bookmarks']:
                        list_bookmarks(dlpx_obj,
                            arguments['--tags'] if arguments['--tags'] else None)
                    thingstodo.pop()
                # get all the jobs, then inspect them
                i = 0
                for j in dlpx_obj.jobs.keys():
                    job_obj = job.get(dlpx_obj.server_session,
                                      dlpx_obj.jobs[j])
                    print_debug(job_obj)
                    print_info('{}: Running JS Bookmark: {}'.format(
                        engine['hostname'], job_obj.job_state))
                    if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
                        # If the job is in a non-running state, remove it
                        # from the running jobs list.
                        del dlpx_obj.jobs[j]
                    elif job_obj.job_state in 'RUNNING':
                        # If the job is in a running state, increment the
                        # running job count.
                        i += 1
                    print_info('{}: {:d} jobs running.'.format(
                        engine['hostname'], i))
                    # If we have running jobs, pause before repeating the
                    # checks.
                    if len(dlpx_obj.jobs) > 0:
                        sleep(float(arguments['--poll']))
    except (DlpxException, RequestError, JobError, HttpError) as e:
        print_exception('Error in js_bookmark: {}\n{}'.format(
            engine['hostname'], e))
        sys.exit(1)
def main(arguments):
    #We want to be able to call on these variables anywhere in the script.
    global single_thread
    global usebackup
    global time_start
    global config_file_path
    global dx_session_obj
    global debug

    if arguments['--debug']:
        debug = True

    try:
        dx_session_obj = GetSession()
        logging_est(arguments['--logdir'])
        print_debug(arguments)
        time_start = time()
        single_thread = False
        config_file_path = arguments['--config']
        #Parse the dxtools.conf and put it into a dictionary
        dx_session_obj.get_config(config_file_path)

        #This is the function that will handle processing main_workflow for
        # all the servers.
        run_job()

        elapsed_minutes = time_elapsed()
        print_info('script took {:.2f} minutes to get this far.'.format(
            elapsed_minutes))

    #Here we handle what we do when the unexpected happens
    except SystemExit as e:
        """
        This is what we use to handle our sys.exit(#)
        """
        sys.exit(e)

    except HttpError as e:
        """
        We use this exception handler when our connection to Delphix fails
        """
        print_exception('Connection failed to the Delphix Engine'
                        'Please check the ERROR message:\n{}'.format(e))
        sys.exit(1)

    except JobError as e:
        """
        We use this exception handler when a job fails in Delphix so that
        we have actionable data
        """
        elapsed_minutes = time_elapsed()
        print_exception('A job failed in the Delphix Engine')
        print_info('{} took {:.2f} minutes to get this far\n{}'.format(
                   basename(__file__), elapsed_minutes, e))
        sys.exit(3)

    except KeyboardInterrupt:
        """
        We use this exception handler to gracefully handle ctrl+c exits
        """
        print_debug("You sent a CTRL+C to interrupt the process")
        elapsed_minutes = time_elapsed()
        print_info('{} took {:.2f} minutes to get this far\n'.format(
                   basename(__file__), elapsed_minutes))

    except:
        """
        Everything else gets caught here
        """
        print_exception(sys.exc_info()[0])
        elapsed_minutes = time_elapsed()
        print_info('{} took {:.2f} minutes to get this far\n'.format(
                   basename(__file__), elapsed_minutes))
        sys.exit(1)
def create_linux_env(dlpx_obj, env_name, host_user, ip_addr, toolkit_path,
                     pw=None):

    """
    Create a Linux environment.

    env_name: The name of the environment
    host_user: The server account used to authenticate 
    ip_addr: DNS name or IP address of the environment
    toolkit_path: Path to the toolkit. Note: This directory must be 
                  writable by the host_user
    pw: Password of the user. Default: None (use SSH keys instead)
    """
    engine_name = dlpx_obj.dlpx_engines.keys()[0]
    env_params_obj = HostEnvironmentCreateParameters()

    if pw is None:
        print_debug('Creating the environment with SSH Keys')
        env_params_obj.primary_user = {'type': 'EnvironmentUser',
                                          'name': host_user,
                                          'credential': {
                                          'type': 'SystemKeyCredential'}}

    else:
        print_debug('Creating the environment with a password')
        env_params_obj.primary_user = {'type': 'EnvironmentUser',
                                          'name': host_user,
                                          'credential': {
                                          'type': 'PasswordCredential',
                                          'password': pw }}

    env_params_obj.host_parameters = {'type': 'UnixHostCreateParameters',
                                     'host': { 'address': ip_addr,
                                     'type': 'UnixHost',
                                     'name': env_name,
                                     'toolkitPath': toolkit_path}}

    env_params_obj.host_environment = UnixHostEnvironment()
    env_params_obj.host_environment.name = env_name

    if arguments['--ase']:
        env_params_obj.host_environment.ase_host_environment_parameters = \
            ASEHostEnvironmentParameters()

        try:
            env_params_obj.host_environment.ase_host_environment_parameters.db_user = \
                arguments['--ase_user']
            env_params_obj.host_environment.ase_host_environment_parameters.credentials = {
                                            'type': 'PasswordCredential',
                                            'password': arguments['--ase_pw']}
        except KeyError:
            print_exception('The --ase_user and --ase_pw arguments are'
                            ' required with the --ase flag.\n')

    try:
        environment.create(dlpx_obj.server_session,
                           env_params_obj)
        dlpx_obj.jobs[engine_name] = \
                                   dlpx_obj.server_session.last_job

    except (DlpxException, RequestError, HttpError) as e:
        print('\nERROR: Encountered an exception while creating the '
              'environment:\n{}'.format(e))
    except JobError as e:
        print_exception('JobError while creating environment {}:\n{}'.format(
            e, e.message))
Ejemplo n.º 54
0
def main_workflow(engine):
    """
    This function actually runs the jobs.
    Use the @run_async decorator to run this function asynchronously.
    This allows us to run against multiple Delphix Engine simultaneously

    engine: Dictionary containing engine information
    """

    # Establish these variables as empty for use later
    environment_obj = None
    source_objs = None
    jobs = {}

    try:
        # Setup the connection to the Delphix Engine
        dx_session_obj.serversess(
            engine["ip_address"], engine["username"], engine["password"]
        )

        group_obj = find_obj_by_name(
            dx_session_obj.server_session, group, arguments["--target_grp"]
        )

        # Get the reference of the target environment.
        print_debug("Getting environment for %s\n" % (host_name), debug)

        # Get the environment object by the hostname
        environment_obj = find_obj_by_name(
            dx_session_obj.server_session, environment, host_name
        )

    except DlpxException as e:
        print (
            "\nERROR: Engine %s encountered an error while provisioning "
            "%s:\n%s\n" % (engine["hostname"], arguments["--target"], e)
        )
        sys.exit(1)

    print_debug(
        "Getting database information for %s\n" % (arguments["--source"]), debug
    )
    try:
        # Get the database reference we are copying from the database name
        database_obj = find_obj_by_name(
            dx_session_obj.server_session, database, arguments["--source"]
        )
    except DlpxException:
        return

    thingstodo = ["thingtodo"]
    # reset the running job count before we begin
    i = 0

    try:
        with dx_session_obj.job_mode(single_thread):
            while len(jobs) > 0 or len(thingstodo) > 0:
                arg_type = arguments["--type"].lower()
                if len(thingstodo) > 0:

                    if arg_type == "oracle":
                        create_oracle_si_vdb(
                            engine,
                            jobs,
                            database_name,
                            group_obj,
                            environment_obj,
                            database_obj,
                            arguments["--prerefresh"],
                            arguments["--postrefresh"],
                            arguments["--prerollback"],
                            arguments["--postrollback"],
                            arguments["--configure-clone"],
                        )

                    elif arg_type == "ase":
                        create_ase_vdb(
                            engine,
                            server,
                            jobs,
                            group_obj,
                            database_name,
                            environment_obj,
                            database_obj,
                        )

                    elif arg_type == "mssql":
                        create_mssql_vdb(
                            engine,
                            jobs,
                            group_obj,
                            database_name,
                            environment_obj,
                            database_obj,
                        )

                    elif arg_type == "vfiles":
                        create_vfiles_vdb(
                            engine,
                            jobs,
                            group_obj,
                            database_name,
                            environment_obj,
                            database_obj,
                            arguments["--prerefresh"],
                            arguments["--postrefresh"],
                            arguments["--prerollback"],
                            arguments["--postrollback"],
                            arguments["--configure-clone"],
                        )

                    thingstodo.pop()

                # get all the jobs, then inspect them
                i = 0
                for j in jobs.keys():
                    job_obj = job.get(dx_session_obj.server_session, jobs[j])
                    print_debug(job_obj, debug)
                    print_info(
                        engine["hostname"] + ": VDB Provision: " + job_obj.job_state
                    )

                    if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
                        # If the job is in a non-running state, remove it from
                        # the running jobs list.
                        del jobs[j]
                    else:
                        # If the job is in a running state, increment the
                        # running job count.
                        i += 1

                print_info("%s: %s jobs running." % (engine["hostname"], str(i)))

                # If we have running jobs, pause before repeating the checks.
                if len(jobs) > 0:
                    sleep(float(arguments["--poll"]))

    except (DlpxException, JobError) as e:
        print("\nError while provisioning %s:\n%s" % (database_name, e.message))
        sys.exit(1)
def main_workflow(engine, dlpx_obj):
    """
    This function is where we create our main workflow.
    Use the @run_async decorator to run this function asynchronously.
    The @run_async decorator allows us to run against multiple Delphix Engine
    simultaneously

    :param engine: Dictionary of engines
    :type engine: dictionary
    :param dlpx_obj: Virtualization Engine session object
    :type dlpx_obj: lib.GetSession.GetSession
    """

    try:
        # Setup the connection to the Delphix Engine
        dlpx_obj.serversess(engine['ip_address'], engine['username'],
                                  engine['password'])

    except DlpxException as e:
        print_exception('ERROR: Engine {} encountered an error while' 
                        '{}:\n{}\n'.format(engine['hostname'],
                        arguments['--target'], e))
        sys.exit(1)

    thingstodo = ["thingtodo"]
    try:
        with dlpx_obj.job_mode(single_thread):
            while len(dlpx_obj.jobs) > 0 or len(thingstodo) > 0:
                if len(thingstodo)> 0:
                    if arguments['--type'] == 'linux' or arguments['--type'] == 'windows':
                        env_name = arguments['--env_name']
                        host_user = arguments['--host_user']
                        pw = arguments['--pw']
                        ip_addr = arguments['--ip']
                        host_name = arguments['--connector_name']
                        if arguments['--type'] == 'linux':
                          toolkit_path = arguments['--toolkit']
                          create_linux_env(dlpx_obj, env_name, host_user,
                                        ip_addr, toolkit_path, pw)
                        else:
                          create_windows_env(dlpx_obj, env_name, host_user,
                                        ip_addr, pw, host_name,)

                    elif arguments['--delete']:
                        delete_env(dlpx_obj, arguments['--delete'])

                    elif arguments['--refresh']:
                        refresh_env(dlpx_obj, arguments['--refresh'])

                    elif arguments['--update_ase_pw']:
                        update_ase_pw(dlpx_obj)

                    elif arguments['--update_ase_user']:
                        update_ase_username(dlpx_obj)
                    elif arguments['--list']:
                        list_env(dlpx_obj)
                    elif arguments['--update_host']:
                        update_host_address(dlpx_obj, arguments['--old_host_address'], arguments['--new_host_address'])
                    elif arguments['--enable']:
                        enable_environment(dlpx_obj, arguments['--env_name'])
                    elif arguments['--disable']:
                        disable_environment(dlpx_obj, arguments['--env_name'])

                    thingstodo.pop()
                # get all the jobs, then inspect them
                i = 0
                for j in dlpx_obj.jobs.keys():
                    job_obj = job.get(dlpx_obj.server_session, dlpx_obj.jobs[j])
                    print_debug(job_obj)
                    print_info('{} Environment: {}'.format(
                               engine['hostname'], job_obj.job_state))
                    if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
                        # If the job is in a non-running state, remove it
                        # from the running jobs list.
                        del dlpx_obj.jobs[j]
                    elif job_obj.job_state in 'RUNNING':
                        # If the job is in a running state, increment the
                        # running job count.
                        i += 1
                    print_info('{}: {:d} jobs running.'.format(
                        engine['hostname'], i))
                    # If we have running jobs, pause before repeating the
                    # checks.
                    if len(dlpx_obj.jobs) > 0:
                        sleep(float(arguments['--poll']))
    except (DlpxException, RequestError, JobError, HttpError) as e:
        print_exception('Error while creating the environment {}\n{}'.format(
            arguments['--env_name'], e))
        sys.exit(1)
Ejemplo n.º 56
0
def main():
    # We want to be able to call on these variables anywhere in the script.
    global single_thread
    global debug

    time_start = time()
    single_thread = False

    try:
        dx_session_obj = GetSession()
        logging_est(arguments['--logdir'])
        print_debug(arguments)
        config_file_path = arguments['--config']
        # Parse the dxtools.conf and put it into a dictionary
        dx_session_obj.get_config(config_file_path)

        # This is the function that will handle processing main_workflow for
        # all the servers.
        run_job(dx_session_obj, config_file_path)

        elapsed_minutes = time_elapsed(time_start)
        print_info('script took {:.2f} minutes to get this far.'.format(
            elapsed_minutes))

    # Here we handle what we do when the unexpected happens
    except SystemExit as e:
        # This is what we use to handle our sys.exit(#)
        sys.exit(e)

    except DlpxException as e:
        # We use this exception handler when an error occurs in a function call.
        print_exception('ERROR: Please check the ERROR message below:\n'
                        '{}'.format(e.message))
        sys.exit(2)

    except HttpError as e:
        # We use this exception handler when our connection to Delphix fails
        print_exception('ERROR: Connection failed to the Delphix Engine. Please'
                        'check the ERROR message below:\n{}'.format(e.message))
        sys.exit(2)

    except JobError as e:
        # We use this exception handler when a job fails in Delphix so that we
        # have actionable data
        print_exception('A job failed in the Delphix Engine:\n{}'.format(e.job))
        elapsed_minutes = time_elapsed(time_start)
        print_exception('{} took {:.2f} minutes to get this far'.format(
            basename(__file__), elapsed_minutes))
        sys.exit(3)

    except KeyboardInterrupt:
        # We use this exception handler to gracefully handle ctrl+c exits
        print_debug('You sent a CTRL+C to interrupt the process')
        elapsed_minutes = time_elapsed(time_start)
        print_info('{} took {:.2f} minutes to get this far'.format(
            basename(__file__), elapsed_minutes))
    except:
        # Everything else gets caught here
        print_exception('{}\n{}'.format(sys.exc_info()[0],
                                        traceback.format_exc()))
        elapsed_minutes = time_elapsed(time_start)
        print_info("{} took {:.2f} minutes to get this far".format(
            basename(__file__), elapsed_minutes))
        sys.exit(1)
Ejemplo n.º 57
0
def main_workflow(engine, dlpx_obj):
    """
    This function is where we create our main workflow.
    Use the @run_async decorator to run this function asynchronously.
    The @run_async decorator allows us to run against multiple Delphix Engine
    simultaneously

    :param engine: Dictionary of engines
    :type engine: dictionary
    :param dlpx_obj: Virtualization Engine session object
    :type dlpx_obj: lib.GetSession.GetSession
    """

    try:
        # Setup the connection to the Delphix Engine
        dlpx_obj.serversess(engine['ip_address'], engine['username'],
                            engine['password'])
    except DlpxException as e:
        print_exception(
            'ERROR: js_bookmark encountered an error authenticating'
            ' to {} {}:\n{}\n'.format(engine['hostname'],
                                      arguments['--target'], e))
        sys.exit(1)

    thingstodo = ["thingtodo"]
    try:
        with dlpx_obj.job_mode(single_thread):
            while len(dlpx_obj.jobs) > 0 or len(thingstodo) > 0:
                if len(thingstodo) > 0:
                    if arguments['--create_bookmark']:
                        create_bookmark(
                            dlpx_obj, arguments['--create_bookmark'],
                            arguments['--data_layout'],
                            arguments['--branch_name']
                            if arguments['--branch_name'] else None,
                            arguments['--tag'] if arguments['--tag'] else None,
                            arguments['--description']
                            if arguments['--description'] else None)
                    elif arguments['--delete_bookmark']:
                        delete_bookmark(dlpx_obj,
                                        arguments['--delete_bookmark'])
                    elif arguments['--update_bookmark']:
                        update_bookmark(dlpx_obj,
                                        arguments['--update_bookmark'])
                    elif arguments['--share_bookmark']:
                        share_bookmark(dlpx_obj, arguments['--share_bookmark'])
                    elif arguments['--unshare_bookmark']:
                        unshare_bookmark(dlpx_obj,
                                         arguments['--unshare_bookmark'])
                    elif arguments['--list_bookmarks']:
                        list_bookmarks(
                            dlpx_obj,
                            arguments['--tag'] if arguments['--tag'] else None)
                    thingstodo.pop()
                # get all the jobs, then inspect them
                i = 0
                for j in dlpx_obj.jobs.keys():
                    job_obj = job.get(dlpx_obj.server_session,
                                      dlpx_obj.jobs[j])
                    print_debug(job_obj)
                    print_info('{}: Running JS Bookmark: {}'.format(
                        engine['hostname'], job_obj.job_state))
                    if job_obj.job_state in [
                            "CANCELED", "COMPLETED", "FAILED"
                    ]:
                        # If the job is in a non-running state, remove it
                        # from the running jobs list.
                        del dlpx_obj.jobs[j]
                    elif job_obj.job_state in 'RUNNING':
                        # If the job is in a running state, increment the
                        # running job count.
                        i += 1
                    print_info('{}: {:d} jobs running.'.format(
                        engine['hostname'], i))
                    # If we have running jobs, pause before repeating the
                    # checks.
                    if len(dlpx_obj.jobs) > 0:
                        sleep(float(arguments['--poll']))
    except (DlpxException, RequestError, JobError, HttpError) as e:
        print_exception('Error in js_bookmark: {}\n{}'.format(
            engine['hostname'], e))
        sys.exit(1)
Ejemplo n.º 58
0
def main_workflow(engine, dlpx_obj):
    """
    This function is where we create our main workflow.
    Use the @run_async decorator to run this function asynchronously.
    The @run_async decorator allows us to run against multiple Delphix Engine
    simultaneously

    :param engine: Dictionary of engines
    :param dlpx_obj: Virtualization Engine session object
    """

    #Establish these variables as empty for use later
    environment_obj = None
    source_objs = None

    try:
        #Setup the connection to the Delphix Engine
        dlpx_obj.serversess(engine['ip_address'], engine['username'],
                                  engine['password'])
    except DlpxException as e:
        print_exception('\nERROR: Engine {} encountered an error while '
                        'provisioning {}:\n{}\n'.format(engine['hostname'],
                                                        arguments['--target'],
                                                        e))
        sys.exit(1)

    thingstodo = ["thingtodo"]
    try:
        with dlpx_obj.job_mode(single_thread):
            while (len(dlpx_obj.jobs) > 0 or len(thingstodo) > 0):
                if len(thingstodo) > 0:
                    if arguments['--create_branch']:
                        create_branch(dlpx_obj, arguments['--create_branch'],
                                      arguments['--container_name'],
                                      arguments['--template_name']
                                      if arguments['--template_name'] else None,
                                      arguments['--bookmark_name']
                                      if arguments['--bookmark_name'] else None)
                    elif arguments['--delete_branch']:
                        delete_branch(dlpx_obj, arguments['--delete_branch'])
                    elif arguments['--update_branch']:
                        update_branch(dlpx_obj, arguments['--update_branch'])
                    elif arguments['--activate_branch']:
                        activate_branch(dlpx_obj,
                                        arguments['--activate_branch'])
                    elif arguments['--list_branches']:
                        list_branches(dlpx_obj)
                    thingstodo.pop()
                # get all the jobs, then inspect them
                i = 0
                for j in dlpx_obj.jobs.keys():
                    job_obj = job.get(dlpx_obj.server_session,
                                      dlpx_obj.jobs[j])
                    print_debug(job_obj)
                    print_info('{}: Provisioning JS Branch: {}'.format(
                        engine['hostname'], job_obj.job_state))
                    if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
                        # If the job is in a non-running state, remove it
                        # from the running jobs list.
                        del dlpx_obj.jobs[j]
                    elif job_obj.job_state in 'RUNNING':
                        # If the job is in a running state, increment the
                        # running job count.
                        i += 1
                    print_info('{}: {:d} jobs running.'.format(
                        engine['hostname'], i))
                    # If we have running jobs, pause before repeating the
                    # checks.
                    if len(dlpx_obj.jobs) > 0:
                        sleep(float(arguments['--poll']))
    except (DlpxException, RequestError, JobError, HttpError) as e:
        print_exception('\nError in js_branch: {}\n{}'.format(
            engine['hostname'], e))