def main(): # => Load the default arguments including those specific to the Cohesity Protection Jobs. argument_spec = cohesity_common_argument_spec() argument_spec.update( dict(task_name=dict(type=str), source_db=dict(type=str, required=True), source_server=dict(type=str, required=True), target_db=dict(type=str, required=True), target_server=dict(type=str, required=True), oracle_home=dict(type=str, required=True), oracle_base=dict(type=str, required=True), oracle_data=dict(type=str, required=True), channels=dict(type=str, required=False), control_file=dict(type=str, default=''), redo_log_path=dict(type=str, default=''), audit_path=dict(type=str, default=''), diag_path=dict(type=str, default=''), fra_path=dict(type=str, default=''), fra_size_mb=dict(type=int, default=2048), bct_file=dict(type=str, default=''), log_time=dict(type=str, default=''), view_name=dict(type=str, default=''), overwrite=dict(type=bool, default=False), no_recovery=dict(type=bool, default=False))) # => Create a new module object module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) global cohesity_client cohesity_client = get_cohesity_client(module) results = dict(changed=False, msg='Attempting to restore task') token = get__cohesity_auth__token(module) database_info = search_for_database(token, module) resp = create_recover_job(module, token, database_info) # Check for restore task status. task_id = resp['restoreTask']['performRestoreTaskState']['base']['taskId'] status = check_for_status(module, task_id) view_name = module.params.get('view_name') target, server = (view_name, 'view') if view_name else \ (module.params.get('target_server'), 'server') msg = 'Successfully restored task to %s %s' % (server, target) if status == False: msg = 'Error occured during task recovery.' module.exit_json(changed=status, output=msg) module.exit_json(**results)
def main(): # => Load the default arguments including those specific to the Cohesity protection policy. argument_spec = cohesity_common_argument_spec() argument_spec.update( dict( name=dict(type=str, required=True), description=dict(type='str', default=''), state=dict(choices=['present', 'absent'], default='present'), storage_domain=dict(type='str', required=True), qos_policy=dict(type='str', default='Backup Target Low'), protocol=dict(type='str', default='All'), case_insensitive=dict(type='bool', required=True), object_key_pattern=dict(type='str', required=False), inline_dedupe_compression=dict(type='bool', default=False), security=dict(type='dict', required=False), quota=dict(type='dict', required=False), nfs_options=dict(type='dict', required=False), smb_options=dict(type='dict', required=False) ) ) # => Create a new module object module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) results = dict( changed=False, msg="Attempting to manage Cohesity view", state=module.params.get('state') ) global cohesity_client base_controller = BaseController() base_controller.global_headers['user-agent'] = 'cohesity-ansible/v2.3.4' cohesity_client = get_cohesity_client(module) view_exists, view_details = get_view_details(module) if module.check_mode: check_mode_results = dict( changed=False, msg="Check Mode: Cohesity view doesn't exist", name="" ) if module.params.get('state') == "present": if view_exists: check_mode_results['msg'] =\ "Check Mode: Cohesity view is updated." check_mode_results['name'] = view_details.name check_mode_results['changed'] = True else: check_mode_results['msg'] =\ "Check Mode: Cohesity view doesn't exist." \ " This action would create a new Cohesity view" check_mode_results['changed'] = True else: if view_exists: check_mode_results['msg'] =\ "Check Mode: Cohesity view is present." \ "This action would delete the view." check_mode_results['name'] = view_details.name check_mode_results['changed'] = True else: check_mode_results['msg'] =\ "Check Mode: Cohesity view doesn't exist. No changes." module.exit_json(**check_mode_results) elif module.params.get('state') == "present": if view_exists: update_view(module) else: create_view(module) elif module.params.get('state') == "absent": if view_exists: delete_view(module) results = dict( changed=True, msg="Cohesity view is deleted successfully", view_name=module.params.get('name') ) else: results = dict( changed=False, msg="Cohesity view doesn't exist", view_name=module.params.get('name') ) else: module.fail_json(msg="Invalid State selected: {}".format( module.params.get('state')), changed=False) module.exit_json(**results)
def main(): # => Load the default arguments including those specific to the Cohesity Protection Jobs. argument_spec = cohesity_common_argument_spec() argument_spec.update( dict( name=dict(type='str', required=True), state=dict(choices=['present', 'absent', 'started', 'stopped'], default='present'), endpoint=dict(type='str', required=True), restore_to_source=dict(type='str', default=''), job_name=dict(type='str', default=''), backup_id=dict(type='int'), backup_timestamp=dict(type='str'), # => Currently, the only supported environments types are list in the choices # => For future enhancements, the below list should be consulted. # => 'SQL', 'View', 'Puppeteer', 'Pure', 'Netapp', 'HyperV', 'Acropolis', 'Azure' environment=dict(choices=['VMware'], default='VMware'), vm_names=dict(type='list'), wait_for_job=dict(type='bool', default=True), wait_minutes=dict(type='str', default=20), datastore_id=dict(type='int'), datastore_name=dict(type='str', default=''), datastore_folder_id=dict(type='int'), interface_group_name=dict(type='str'), network_connected=dict(type='bool', default=True), network_id=dict(type='int'), network_name=dict(type='str'), power_state=dict(type='bool', default=True), prefix=dict(type='str'), resource_pool_id=dict(type='int'), resource_pool_name=dict(type='str', default=''), recovery_process_type=dict(type='str', default='InstantRecovery'), suffix=dict(type='str'), vm_folder_id=dict(type='int'), vm_folder_name=dict(type='str'))) # => Create a new module object module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) results = dict(changed=False, msg="Attempting to manage Protection Source", state=module.params.get('state')) job_details = dict( token=get__cohesity_auth__token(module), endpoint=module.params.get('endpoint'), job_name=module.params.get('job_name'), environment=module.params.get('environment'), ) if module.params.get('job_name'): job_details['name'] = module.params.get( 'job_name') + ": " + module.params.get('name') else: job_details['name'] = module.params.get('name') if module.params.get('backup_id'): job_details['jobRunId'] = module.params.get('backup_id') if module.params.get('backup_timestamp'): job_details['backup_timestamp'] = module.params.get('backup_timestamp') job_exists = check__protection_restore__exists(module, job_details) if module.check_mode: check_mode_results = dict( changed=False, msg= "Check Mode: Cohesity Protection Restore Job is not currently registered", id="") if module.params.get('state') == "present": if job_exists: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Restore Job is currently registered. No changes" else: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Restore Job is not currently registered. This action would register the Cohesity Protection Job." check_mode_results['id'] = job_exists else: if job_exists: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Restore Job is currently registered. This action would unregister the Cohesity Protection Job." check_mode_results['id'] = job_exists else: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Restore Job is not currently registered. No changes." module.exit_json(**check_mode_results) elif module.params.get('state') == "present": if job_exists: results = dict(changed=False, msg="The Restore Job for is already registered", id=job_exists, name=job_details['name']) else: # check__mandatory__params(module) environment = module.params.get('environment') response = [] if environment == "VMware": job_details['vm_names'] = module.params.get('vm_names') source_object_info = get_snapshot_information_for_vmname( module, job_details) restore_data = dict( name=job_details['name'], vm_names=module.params.get('vm_names'), objects=source_object_info, token=job_details['token'], type="kRecoverVMs", vmwareParameters=dict( poweredOn=module.params.get('power_state'), disableNetwork=module.params.get('network_connected'), recoveryProcessType='k' + module.params.get('recovery_process_type'))) if module.params.get('prefix'): restore_data['vmwareParameters'][ 'prefix'] = module.params.get('prefix') if module.params.get('suffix'): restore_data['vmwareParameters'][ 'suffix'] = module.params.get('suffix') if module.params.get('interface_group_name'): iface_group = module.params.get('interface_group_name') # Check the group exist in the cluster. cohesity_client = get_cohesity_client(module) vlans = cohesity_client.vlan.get_vlans() for vlan in vlans: if vlan.iface_group_name == iface_group: restore_data['vlanParameters'] = dict( interfaceName=iface_group, disableVlan=True) break if restore_data.get('vlanParameters', None) == None: module.fail_json( msg="Failed to find Inferface Group with name %s" % iface_group, changed=False) if module.params.get('restore_to_source'): datastore_id = module.params.get('datastore_id') resource_pool_id = module.params.get('resource_pool_id') restore_to_source_details = get_source_details( module, True) restore_to_source_objects = get_vmware_source_objects( module, restore_to_source_details['id']) if (module.params.get('resource_pool_id') or module.params.get('resource_pool_name')) and\ (module.params.get('datastore_id') or module.params.get('datastore_name')): if module.params.get('resource_pool_name'): resource_pool_id =\ get_vmware_object_id(restore_to_source_objects, module.params.get('resource_pool_name'), 'kResourcePool') if module.params.get('datastore_name'): datastore_id = get_vmware_object_id( restore_to_source_objects, module.params.get('datastore_name'), 'kDatastore') if not datastore_id or not resource_pool_id: module.fail_json( msg="Failed to find the resource pool" " or datastore on the target source") restore_data[ 'newParentId'] = restore_to_source_details['id'] restore_data['vmwareParameters'][ 'resourcePoolId'] = resource_pool_id restore_data['vmwareParameters'][ 'datastoreId'] = datastore_id # => Optional VMware Parameters if module.params.get('datastore_folder_id'): restore_data['vmwareParameters'][ 'datastoreFolderId'] = module.params.get( 'datastore_folder_id') if module.params.get('network_id'): restore_data['vmwareParameters'][ 'networkId'] = module.params.get('network_id') if module.params.get('network_name'): network_name = module.params.get('network_name') network_id = get_vmware_object_id( restore_to_source_objects, network_name, 'kNetwork') if not network_id: module.fail_json( msg="Failed to find network with name %s" % network_name, changed=False) restore_data['vmwareParameters'][ 'networkId'] = network_id if module.params.get('vm_folder_id'): restore_data['vmwareParameters'][ 'vmFolderId'] = module.params.get( 'vm_folder_id') if module.params.get('vm_folder_name'): vm_folder_name = module.params.get( 'vm_folder_name') vm_folder_id = get_vmware_object_id( restore_to_source_objects, vm_folder_name, 'kFolder') if not vm_folder_id: module.fail_json( msg="Failed to find folder with name %s" % vm_folder_name, changed=False) restore_data['vmwareParameters'][ 'vmFolderId'] = vm_folder_id else: module.fail_json( msg="The resource pool and datastore details are" " required for restoring to a new location") # => Start the Virtual Machine Restore operation job_start = start_restore__vms(module, restore_data) job_start['vm_names'] = job_details['vm_names'] response.append(job_start) else: # => This error should never happen based on the set assigned to the parameter. # => However, in case, we should raise an appropriate error. module.fail_json( msg="Invalid Environment Type selected: {0}".format( module.params.get('environment')), changed=False) task = dict(changed=False) for jobCheck in response: restore_data['id'] = jobCheck['id'] restore_data['environment'] = environment if module.params.get('wait_for_job'): task = wait_restore_complete(module, restore_data) jobCheck['status'] = task['status'] results = dict(changed=True, msg="Registration of Cohesity Restore Job Complete", name=module.params.get('job_name') + ": " + module.params.get('name'), restore_jobs=response) if not task['changed'] and module.params.get('wait_for_job'): # => If the task failed to complete, then the key 'changed' will be False and # => we need to fail the module. results['changed'] = False results.pop('msg') errorCode = "" # => Set the errorCode to match the task['error'] if the key exists if 'error' in task: errorCode = task['error'] module.fail_json(msg="Cohesity Restore Job Failed to complete", error=errorCode, **results) elif module.params.get('state') == "absent": results = dict( changed=False, msg= "Cohesity Restore: This feature (absent) has not be implemented yet.", name=module.params.get('job_name') + ": " + module.params.get('name')) else: # => This error should never happen based on the set assigned to the parameter. # => However, in case, we should raise an appropriate error. module.fail_json(msg="Invalid State selected: {}".format( module.params.get('state')), changed=False) module.exit_json(**results)
def main(): # => Load the default arguments including those specific to the Cohesity clone task. argument_spec = cohesity_common_argument_spec() argument_spec.update( dict(name=dict(type='str', required=True), state=dict(choices=['present', 'absent'], default='present'), job_name=dict(type='str', required=True), view_name=dict(type='str', required=True), backup_timestamp=dict(type='str', default=''), environment=dict(choices=['VMware'], default='VMware'), vm_names=dict(type='list', required=True), wait_for_job=dict(type='bool', default=True), prefix=dict(type='str', default=""), suffix=dict(type='str', default=""), power_on=dict(type='bool', default=True), network_connected=dict(type='bool', default=True), wait_minutes=dict(type=int, default=30), resource_pool=dict(type='str', required=True))) # => Create a new module object module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) results = dict(changed=False, msg="Attempting to manage Cohesity Clone", state=module.params.get('state')) global cohesity_client base_controller = BaseController() base_controller.global_headers['user-agent'] = 'cohesity-ansible/v2.3.4' cohesity_client = get_cohesity_client(module) clone_exists, clone_details = get_clone_task(module, False) if module.check_mode: check_mode_results = dict( changed=False, msg="Check Mode: Cohesity clone task doesn't exist", id="") if module.params.get('state') == "present": if clone_exists: check_mode_results['msg'] =\ "Check Mode: Cohesity clone task is already present. No changes" check_mode_results['id'] = clone_details.id else: check_mode_results['msg'] =\ "Check Mode: Cohesity clone task doesn't exist. This action would clone VMs" check_mode_results['changed'] = True else: if clone_exists: check_mode_results['msg'] =\ "Check Mode: Cohesity clone task is present." \ "This action would tear down the Cohesity Clone." check_mode_results['id'] = clone_details.id check_mode_results['changed'] = True else: check_mode_results['msg'] =\ "Check Mode: Cohesity Clone task doesn't exist. No changes." module.exit_json(**check_mode_results) elif module.params.get('state') == "present": if clone_exists: results = dict( changed=False, msg="The clone task with specified name is already present", id=clone_details.id, name=module.params.get('name')) else: clone_vm(module) elif module.params.get('state') == "absent": if clone_exists: destroy_clone(module, clone_details.id) results = dict(changed=True, msg="Cohesity clone is destroyed", id=clone_details.id, task_name=module.params.get('name')) else: results = dict(changed=False, msg="Cohesity clone task doesn't exist", task_name=module.params.get('name')) else: module.fail_json(msg="Invalid State selected: {}".format( module.params.get('state')), changed=False) module.exit_json(**results)
def main(): argument_spec = cohesity_common_argument_spec() argument_spec.update( dict(state=dict(choices=['complete', 'minimal'], default='complete'), include_sources=dict(type='bool', default=False), include_jobs=dict(type='bool', default=False), include_runs=dict(type='bool', default=False), active_only=dict(type='bool', default=False), include_deleted=dict(type='bool', default=False))) module = AnsibleModule(argument_spec=argument_spec) results = dict(changed=False, cluster='') params = dict(server=module.params.get('cluster'), username=module.params.get('username'), password=module.params.get('password'), validate_certs=module.params.get('validate_certs'), active_only=module.params.get('active_only'), is_deleted=module.params.get('is_deleted')) params['token'] = get__cohesity_auth__token(module) try: include_sources = True include_jobs = True include_runs = True if module.params.get('state') == 'complete': pass else: if module.params.get('include_sources'): include_sources = True if module.params.get('include_jobs'): include_jobs = True if module.params.get('include_runs'): include_runs = True results['cluster'] = get__cluster(params) results['cluster']['nodes'] = get__nodes(params) # => Create a root node for all protection related items results['cluster']['protection'] = dict() # => We will group each Protection Source based on the # => environment type so to do this, we will declare # => sources as a dictionary. results['cluster']['protection']['sources'] = dict() # => Iterate each supported Environment type and collect each grouped # => by type. if include_sources: env_types = ['Physical', 'VMware', 'GenericNas'] for env_type in env_types: params['environment'] = env_type results['cluster']['protection']['sources'][ env_type] = get__prot_source__all(params) # => Let's remove this key since it is not needed for further processing. params.pop('environment') # => Collect all Cohesity Protection Policies results['cluster']['protection']['policies'] = get__prot_policy__all( params) # => Collect all registered Protection Jobs # => This value can be filtered by choosing # => `active_only=True/False` and/or `is_deleted=True/False` if include_jobs: results['cluster']['protection']['jobs'] = get__prot_job__all( params) # => Collect all Storage Domains results['cluster']['storage_domains'] = get__storage_domain_id__all( params) # => Collect all Protection Jobs execution details # => This value can be filtered by choosing # => `active_only=True/False` and/or `is_deleted=True/False` if include_runs: results['cluster']['protection'][ 'runs'] = get__protection_run__all(params) except Exception as error: module.fail_json(msg="Failure while collecting Cohesity Facts", exception=traceback.format_exc()) module.exit_json(**results)
def main(): # => Load the default arguments including those specific to the Cohesity Protection Jobs. argument_spec = cohesity_common_argument_spec() argument_spec.update( dict( state=dict(choices=['present', 'absent', 'started', 'stopped'], default='present'), name=dict(type='str', required=True, aliases=['job_name']), description=dict(type='str', default=''), # => Currently, the only supported environments types are list in the choices # => For future enhancements, the below list should be consulted. # => 'SQL', 'View', 'Puppeteer', 'Pure', 'Netapp', 'HyperV', 'Acropolis', 'Azure' environment=dict(choices=[ 'VMware', 'PhysicalFiles', 'Physical', 'GenericNas', 'View' ], default='PhysicalFiles'), view_name=dict(type='str', required=False), protection_sources=dict(type='list', aliases=['sources'], default=''), protection_policy=dict(type='str', aliases=['policy'], default='Bronze'), storage_domain=dict(type='str', default='DefaultStorageDomain'), time_zone=dict(type='str', default='America/Los_Angeles'), start_time=dict(type='str', default=''), delete_backups=dict(type='bool', default=False), ondemand_run_type=dict( choices=['Regular', 'Full', 'Log', 'System'], default='Regular'), cancel_active=dict(type='bool', default=False), validate_certs=dict(type='bool', default=False), exclude=dict(type=list, default=''), include=dict(type=list, default=''))) # => Create a new module object module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) results = dict(changed=False, msg="Attempting to manage Protection Source", state=module.params.get('state')) job_details = dict(token=get__cohesity_auth__token(module), name=module.params.get('name'), description=module.params.get('description'), environment=module.params.get('environment'), sourceIds=module.params.get('protection_sources'), policyId=module.params.get('protection_policy'), viewBoxId=module.params.get('storage_domain'), timezone=module.params.get('time_zone')) job_exists, job_meta_data = check__protection_job__exists( module, job_details) if module.check_mode: check_mode_results = dict( changed=False, msg= "Check Mode: Cohesity Protection Job is not currently registered", id="") if module.params.get('state') == "present": if job_exists: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Job is currently registered. No changes" else: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Job is not currently registered. This action would register the Cohesity Protection Job." check_mode_results['id'] = job_exists else: if job_exists: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Job is currently registered. This action would unregister the Cohesity Protection Job." check_mode_results['id'] = job_exists else: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Job is not currently registered. No changes." module.exit_json(**check_mode_results) elif module.params.get('state') == "present": results['source_vars'] = job_details if job_exists: if module.params.get('environment') == "VMware": update_vmware_job(module, job_meta_data, job_details) if module.params.get('environment') in ("PhysicalFiles", "Physical", "GenericNas"): update_job_util(module, job_details, job_exists) else: module.exit_json(msg="The protection job already exists", id=job_exists, name=module.params.get('name'), changed=False) else: check__mandatory__params(module) if job_details['environment'] == 'View': job_details['viewName'] = module.params.get('view_name') job_details['viewBoxId'] = get_view_storage_domain_id( module, job_details) del job_details['sourceIds'] else: job_details['sourceIds'] = list() if job_details['environment'] == "PhysicalFiles": job_details['environment'] = "Physical" prot_source = dict(environment=job_details['environment'], token=job_details['token']) i = 0 for source in module.params.get('protection_sources'): prot_source['endpoint'] = source['endpoint'] source_id = get__prot_source_id__by_endpoint( module, prot_source) if source_id: job_details['sourceIds'].append(source_id) module.params.get( 'protection_sources')[i]['endpoint'] = source_id else: module.params.get( 'protection_sources')[i]['endpoint'] = None i += 1 job_details[ 'parentSourceId'] = get__prot_source_root_id__by_environment( module, job_details) job_details['viewBoxId'] = get__storage_domain_id__by_name( module, job_details) job_details['environment'] = module.params.get('environment') job_details['policyId'] = get__prot_policy_id__by_name( module, job_details) if module.params.get('start_time'): start_time = list( module.params.get('start_time').replace(":", "")) if not len(start_time) == 4: # => There are only so many options here but if we get more characters # => than four then we need to escape quickly. module.fail_json( msg="Invalid start_time selected (" + module.params.get('start_time') + "). Please review and submit the correct Protection Job Starting time." ) job_details['startTime'] = dict( hour=int(start_time[0] + start_time[1]), minute=int(start_time[2] + start_time[3])) response = register_job(module, job_details) results = dict( changed=True, msg="Registration of Cohesity Protection Job Complete", **response) elif module.params.get('state') == "absent": if job_exists: if len(module.params.get('protection_sources') ) == 1 and not module.params.get('protection_sources')[0]: job_details['id'] = job_exists job_details['deleteSnapshots'] = module.params.get( 'delete_backups') response = unregister_job(module, job_details) results = dict( changed=True, msg="Unregistration of Cohesity Protection Job Complete", id=job_exists, name=module.params.get('name')) else: job_details['id'] = job_exists job_details['sourceIds'] = list() if job_details['environment'] == "PhysicalFiles": job_details['environment'] = "Physical" prot_source = dict(environment=job_details['environment'], token=job_details['token']) for source in module.params.get('protection_sources'): prot_source['endpoint'] = source['endpoint'] source_id = get__prot_source_id__by_endpoint( module, prot_source) if source_id: job_details['sourceIds'].append(source_id) job_details[ 'parentSourceId'] = get__prot_source_root_id__by_environment( module, job_details) job_details['environment'] = module.params.get('environment') existing_job_details = get_prot_job_details( job_details, module) sources_exiting_in_job = set( job_details['sourceIds']).intersection( existing_job_details['sourceIds']) if len(sources_exiting_in_job) != 0 and len( sources_exiting_in_job) != len( existing_job_details['sourceIds']): existing_job_details['sourceIds'] = list( set(existing_job_details['sourceIds']).difference( job_details['sourceIds'])) existing_job_details['token'] = job_details['token'] response = update_job(module, existing_job_details, sources_exiting_in_job) results = dict( changed=True, msg= "Successfully removed the sources from existing protection job", **response) elif len(sources_exiting_in_job) != 0 and len( sources_exiting_in_job) == len( existing_job_details['sourceIds']): module.fail_json( msg="Cannot remove all sources from protection job", changed=False) else: results = dict( changed=False, msg="The protection job doesn't have the sources", id=job_exists, name=module.params.get('name')) else: results = dict( changed=False, msg= "The Protection Job for this host is currently not registered", name=module.params.get('name')) elif module.params.get('state') == "started": if job_exists: job_details['id'] = job_exists job_details['runType'] = module.params.get('ondemand_run_type') response = start_job(module, job_details) results = dict( changed=True, msg="The Protection Job for this host has been started", id=job_exists, name=module.params.get('name')) else: results = dict( changed=False, msg= "The Protection Job for this host is currently not registered", name=module.params.get('name')) elif module.params.get('state') == "stopped": if job_exists: job_details['id'] = job_exists response = stop_job(module, job_details) results = dict( changed=True, msg="The Protection Job for this host has been stopped", id=job_exists, name=module.params.get('name')) else: results = dict( changed=False, msg= "The Protection Job for this host is currently not registered", name=module.params.get('name')) else: # => This error should never happen based on the set assigned to the parameter. # => However, in case, we should raise an appropriate error. module.fail_json(msg="Invalid State selected: {0}".format( module.params.get('state')), changed=False) module.exit_json(**results)
def main(): # => Load the default arguments including those specific to the Cohesity Agent. argument_spec = cohesity_common_argument_spec() argument_spec.update( dict(state=dict(choices=['present', 'absent'], default='present'), endpoint=dict(type='str', required=True), force_register=dict(default=False, type='bool'), db_username=dict(default='', type='str'), db_password=dict(default='', type='str'))) # => Create a new module object module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) global cohesity_client cohesity_client = get_cohesity_client(module) results = dict(changed=False, msg='Attempting to manage Protection Source', state=module.params.get('state')) # Check the endpoint is already registred as a Physical source. prot_sources = dict(token=get__cohesity_auth__token(module), endpoint=module.params.get('endpoint'), environment='kPhysical') current_status = get__protection_source_registration__status( module, prot_sources) if module.check_mode: prot_sources['environment'] = 'kOracle' current_status = get__protection_source_registration__status( module, prot_sources) check_mode_results = dict( changed=False, msg= 'Check Mode: Cohesity Protection Source is not currently registered', id='') if module.params.get('state') == 'present': if current_status: check_mode_results[ 'msg'] = 'Check Mode: Cohesity Protection Source is currently registered. No changes' else: check_mode_results[ 'msg'] = 'Check Mode: Cohesity Protection Source is not currently registered. This action would register the Protection Source.' check_mode_results['id'] = current_status else: if current_status: check_mode_results[ 'msg'] = 'Check Mode: Cohesity Protection Source is currently registered. This action would unregister the Protection Source.' check_mode_results['id'] = current_status else: check_mode_results[ 'msg'] = 'Check Mode: Cohesity Protection Source is not currently registered. No changes.' module.exit_json(**check_mode_results) elif module.params.get('state') == 'present': if current_status: prot_sources = dict(token=get__cohesity_auth__token(module), endpoint=module.params.get('endpoint'), environment='kOracle') oracle_status = get__protection_source_registration__status( module, prot_sources) if not oracle_status: resp = register_oracle_source(module, prot_sources, current_status) if resp == True: results = dict( changed=True, msg= 'Registration of Cohesity Protection Source Complete') else: results = dict( changed=False, msg='Error while registering Cohesity Protection Source' ) else: results = dict( changed=False, msg= 'The Protection Source for this host is already registered', id=current_status, endpoint=module.params.get('endpoint')) else: sleep_count = 0 # Register the endpoint as Physical source first. response = register_source(module, prot_sources) # Wait until Physical source is successfully registered. while sleep_count < 5: sleep_count += 1 status = get__protection_source_registration__status( module, dict(environment='kPhysical', token=prot_sources['token'], endpoint=prot_sources['endpoint'])) time.sleep(10) if status == False: module.fail_json( changed=False, msg= 'Error while registering Cohesity Physical Protection Source' ) response = register_oracle_source(module, prot_sources, response.id) if response == True: results = dict( changed=True, msg='Registration of Cohesity Protection Source Complete') else: results = dict( changed=False, msg='Error while registering Cohesity Protection Source') elif module.params.get('state') == 'absent': if current_status: response = unregister_source(module, current_status) results = dict( changed=True, msg='Unregistration of Cohesity Protection Source Complete', id=current_status, endpoint=module.params.get('endpoint')) else: results = dict( changed=False, msg= 'The Protection Source for this host is currently not registered' ) else: # => This error should never happen based on the set assigned to the parameter. # => However, in case, we should raise an appropriate error. module.fail_json(msg='Invalid State selected: {0}'.format( module.params.get('state')), changed=False) module.exit_json(**results)
def main(): # => Load the default arguments including those specific to the Cohesity Protection Jobs. argument_spec = cohesity_common_argument_spec() argument_spec.update( dict( state=dict(choices=['present', 'absent'], default='present'), name=dict(type='str', required=True), # => Currently, the only supported environments types are list in the choices # => For future enhancements, the below list should be consulted. # => 'SQL', 'View', 'Puppeteer', 'Pure', 'Netapp', 'HyperV', 'Acropolis', 'Azure' environment=dict( choices=['PhysicalFiles', 'GenericNas', 'Physical'], default='PhysicalFiles'), job_name=dict(type='str', required=True), endpoint=dict(type='str', required=True), backup_id=dict(type='str', default=''), backup_timestamp=dict(type='str', default=''), file_names=dict(type='list', required=True), wait_for_job=dict(type='bool', default=True), overwrite=dict(type='bool', default=True), preserve_attributes=dict(type='bool', default=True), restore_location=dict(type='str', default=''), wait_minutes=dict(type='str', default=10))) # => Create a new module object module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) results = dict(changed=False, msg="Attempting to manage Protection Source", state=module.params.get('state')) job_details = dict(token=get__cohesity_auth__token(module), endpoint=module.params.get('endpoint'), job_name=module.params.get('job_name'), environment=module.params.get('environment'), name=module.params.get('job_name') + ": " + module.params.get('name')) if module.params.get('backup_id'): job_details['jobRunId'] = module.params.get('backup_id') if module.params.get('backup_timestamp'): job_details['backup_timestamp'] = module.params.get('backup_timestamp') job_exists = check__protection_restore__exists(module, job_details) if module.check_mode: check_mode_results = dict( changed=False, msg= "Check Mode: Cohesity Protection Restore Job is not currently registered", id="") if module.params.get('state') == "present": if job_exists: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Restore Job is currently registered. No changes" else: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Restore Job is not currently registered. This action would register the Cohesity Protection Job." check_mode_results['id'] = job_exists else: if job_exists: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Restore Job is currently registered. This action would unregister the Cohesity Protection Job." check_mode_results['id'] = job_exists else: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Restore Job is not currently registered. No changes." module.exit_json(**check_mode_results) elif module.params.get('state') == "present": if job_exists: results = dict(changed=False, msg="The Restore Job for is already registered", id=job_exists, name=module.params.get('job_name') + ": " + module.params.get('name')) else: # check__mandatory__params(module) environment = module.params.get('environment') response = [] if environment in ("PhysicalFiles", "GenericNas", "Physical"): # => Gather the Source Details job_details['file_names'] = module.params.get('file_names') prot_source = dict(environment="Physical", token=job_details['token'], endpoint=module.params.get('endpoint')) if environment == "GenericNas": prot_source['environment'] = "GenericNas" source_id = get__prot_source_id__by_endpoint( module, prot_source) if not source_id: module.fail_json( msg="Failed to find the endpoint on the cluster", changed=False) job_details['endpoint'] = source_id restore_file_list = [] for restore_file in job_details['file_names']: if environment in ("PhysicalFiles", "Physical"): restore_file_list.append( convert__windows_file_name(restore_file)) elif environment == "GenericNas": restore_file_list.append( strip__prefix(job_details['endpoint'], restore_file)) else: restore_file_list = restore_file job_details['file_names'] = restore_file_list source_object_info = get__snapshot_information__for_file( module, job_details) for objectInfo in source_object_info: restore_data = dict( name=module.params.get('job_name') + ": " + module.params.get('name'), filenames=restore_file_list, targetSourceId=objectInfo['protectionSourceId'], sourceObjectInfo=objectInfo, token=job_details['token'], overwrite=module.params.get('overwrite'), preserveAttributes=module.params.get( 'preserve_attributes')) if module.params.get('restore_location'): restore_data['newBaseDirectory'] = module.params.get( 'restore_location') response.append(start_restore__files(module, restore_data)) else: # => This error should never happen based on the set assigned to the parameter. # => However, in case, we should raise an appropriate error. module.fail_json( msg="Invalid Environment Type selected: {0}".format( module.params.get('environment')), changed=False) task = dict(changed=False) for jobCheck in response: restore_data['id'] = jobCheck['id'] restore_data['environment'] = environment if module.params.get('wait_for_job'): task = wait_restore_complete(module, restore_data) jobCheck['status'] = task['status'] results = dict(changed=True, msg="Registration of Cohesity Restore Job Complete", name=module.params.get('job_name') + ": " + module.params.get('name'), restore_jobs=response) if 'file_names' in job_details: results['filenames'] = job_details['file_names'] if not task['changed'] and module.params.get('wait_for_job'): # => If the task failed to complete, then the key 'changed' will be False and # => we need to fail the module. results['changed'] = False results.pop('msg') errorCode = "" # => Set the errorCode to match the task['error'] if the key exists if 'error' in task: errorCode = task['error'] module.fail_json(msg="Cohesity Restore Job Failed to complete", error=errorCode, **results) elif module.params.get('state') == "absent": results = dict( changed=False, msg= "Cohesity Restore: This feature (absent) has not be implemented yet.", name=module.params.get('job_name') + ": " + module.params.get('name')) else: # => This error should never happen based on the set assigned to the parameter. # => However, in case, we should raise an appropriate error. module.fail_json(msg="Invalid State selected: {}".format( module.params.get('state')), changed=False) module.exit_json(**results)
def main(): # => Load the default arguments including those specific to the Cohesity Agent. argument_spec = cohesity_common_argument_spec() argument_spec.update( dict( state=dict(choices=['present', 'absent'], default='present'), download_location=dict(default=''), service_user=dict(default='cohesityagent'), service_group=dict(default='cohesityagent'), create_user=dict(default=True, type='bool'), file_based=dict(default=False, type='bool'), native_package=dict(default=False, type='bool'), download_uri=dict(defaut=''), operating_system=dict(defalut="", type='str'), host=dict(type='str', default=''), upgrade=dict(type='bool', default=False), wait_minutes=dict(type='int', default=30), )) # => Create a new module object module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) results = dict(changed=False, version=False, state=module.params.get('state')) # => Make a temporary directory to house the downloaded installer. if module.params.get('download_location'): tempdir = module.params.get('download_location') create_download_dir(module, tempdir) else: tempdir = mkdtemp(prefix="ansible.") # Agent installation for AIX operating system is done using native package. if module.params.get('operating_system') == "AIX": module.params['native_package'] = True success = True try: if module.check_mode: results = check_agent(module, results) check_mode_results = dict( changed=False, msg="Check Mode: Agent is Currently not Installed", version="") if module.params.get('state') == "present": if results['version']: check_mode_results[ 'msg'] = "Check Mode: Agent is currently installed. No changes" else: check_mode_results[ 'msg'] = "Check Mode: Agent is currently not installed. This action would install the Agent." check_mode_results['version'] = results['version'] else: if results['version']: check_mode_results[ 'msg'] = "Check Mode: Agent is currently installed. This action would uninstall the Agent." check_mode_results['version'] = results['version'] else: check_mode_results[ 'msg'] = "Check Mode: Agent is currently not installed. No changes." module.exit_json(**check_mode_results) elif module.params.get( 'state') == "present" and not module.params.get('upgrade'): # => Check if the Cohesity Agent is currently installed and only trigger the install # => if the agent does not exist. results = check_agent(module, results) if not results['version']: if not module.params.get('native_package'): results['filename'] = download_agent(module, tempdir) results['changed'], results['message'], results[ 'installer'] = extract_agent(module, results['filename']) results['changed'], results['message'] = install_agent( module, results['installer'], False) results = check_agent(module, results) else: results['filename'] = download_agent(module, tempdir) results['changed'], results['message'] = install_agent( module, results['filename'], True) results = check_agent(module, results) elif results['version'] == "unknown": # => There is a problem that we should invesitgate. module.fail_json(msg="Cohesity Agent is partially installed", **results) else: # => If we received a valid version then the assumption will be # => that the Agent is installed. We should simply pass it foward # => and act like things are normal. pass elif module.params.get('state') == "present" and module.params.get( 'upgrade'): if not module.params.get('host'): module.fail_json( changed=False, msg="The host parameter is required for agent upgrades") update_agent(module) elif module.params.get('state') == "absent": # => Check if the Cohesity Agent is currently installed and only trigger the uninstall # => if the agent exists. results = check_agent(module, results) # => If anything is returned, we should remove the agent. We also don't care if there # => is any output from the check so we will pop that out of the results to clean up # => our return data. if results['version']: if not module.params.get('native_package'): results.pop('check_agent', None) # => When removing the agent, we will need to download the installer once again, # => and then run the --full-uninstall command. results['filename'] = download_agent(module, tempdir) results['changed'], results['message'], results[ 'installer'] = extract_agent(module, results['filename']) results['changed'], results['message'] = remove_agent( module, results['installer'], False) else: results['changed'], results['message'] = remove_agent( module, "", True) else: # => This error should never happen based on the set assigned to the parameter. # => However, in case, we should raise an appropriate error. module.fail_json(msg="Invalid State selected: {0}".format( module.params.get('state')), changed=False) except Exception as error: # => The exception handler should still trigger but just in case, let's set this # => variable 'success' to be False. success = False # => Call the proper error handler. msg = "Unexpected error caused while managing the Cohesity Linux Agent." raise__cohesity_exception__handler(error, module, msg) finally: # => We should delete the downloaded installer regardless of our success. This could be debated # => either way but seems like a better choice. if module.params.get('download_location'): if 'installer' in results: shutil.rmtree(results['installer']) else: shutil.rmtree(tempdir) if success: # -> Return Ansible JSON module.exit_json(**results) else: module.fail_json(msg="Cohesity Agent Failed", **results)
def main(): # => Load the default arguments including those specific to the Cohesity Agent. argument_spec = cohesity_common_argument_spec() argument_spec.update( dict( state=dict(choices=['present', 'absent'], default='present'), endpoint=dict(type='str', required=True), # => Currently, the only supported environments types are list in the choices # => For future enhancements, the below list should be consulted. # => 'SQL', 'View', 'Puppeteer', 'Pure', 'Netapp', 'HyperV', 'Acropolis', 'Azure' environment=dict(choices=['VMware', 'Physical', 'GenericNas'], default='Physical'), host_type=dict(choices=['Linux', 'Windows', 'Aix'], default='Linux'), physical_type=dict(choices=['Host', 'WindowsCluster'], default='Host'), force_register=dict(default=False, type='bool'), vmware_type=dict(choices=[ 'VCenter', 'Folder', 'Datacenter', 'ComputeResource', 'ClusterComputeResource', 'ResourcePool', 'Datastore', 'HostSystem', 'VirtualMachine', 'VirtualApp', 'StandaloneHost', 'StoragePod', 'Network', 'DistributedVirtualPortgroup', 'TagCategory', 'Tag' ], default='VCenter'), source_username=dict(type='str', default=''), source_password=dict(type='str', no_log=True, default=''), nas_protocol=dict(choices=['NFS', 'SMB'], default='NFS'), nas_username=dict(type='str', default=''), nas_password=dict(type='str', no_log=True, default=''), nas_type=dict(type='str', default='Host'), skip_validation=dict(type='bool', default=False))) # => Create a new module object module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) results = dict(changed=False, msg="Attempting to manage Protection Source", state=module.params.get('state')) # try: prot_sources = dict(token=get__cohesity_auth__token(module), endpoint=module.params.get('endpoint'), environment=module.params.get('environment')) current_status = get__protection_source_registration__status( module, prot_sources) if module.check_mode: check_mode_results = dict( changed=False, msg= "Check Mode: Cohesity Protection Source is not currently registered", id="") if module.params.get('state') == "present": if current_status: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Source is currently registered. No changes" else: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Source is not currently registered. This action would register the Protection Source." check_mode_results['id'] = current_status else: if current_status: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Source is currently registered. This action would unregister the Protection Source." check_mode_results['id'] = current_status else: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Source is not currently registered. No changes." module.exit_json(**check_mode_results) elif module.params.get('state') == "present": check__mandatory__params(module) if prot_sources['environment'] == "Physical": prot_sources['hostType'] = module.params.get('host_type') prot_sources['physicalType'] = module.params.get('physical_type') if prot_sources['environment'] == "VMware": prot_sources['username'] = module.params.get('source_username') prot_sources['password'] = module.params.get('source_password') prot_sources['vmwareType'] = module.params.get('vmware_type') if prot_sources['environment'] == "GenericNas": prot_sources['nasMountCredentials'] = dict() if module.params.get('nas_protocol') == 'NFS': prot_sources['nasMountCredentials']['nasProtocol'] = 'kNfs3' elif module.params.get('nas_protocol') == 'SMB': prot_sources['nasMountCredentials']['nasProtocol'] = 'kCifs1' if "\\" in ['nas_username']: user_details = module.params.get('nas_username').split( "\\") prot_sources['nasMountCredentials'][ 'username'] = user_details[1] prot_sources['nasMountCredentials'][ 'domain'] = user_details[0] else: prot_sources['nasMountCredentials'][ 'username'] = module.params.get('nas_username') prot_sources['nasMountCredentials'][ 'password'] = module.params.get('nas_password') prot_sources['nasMountCredentials'][ 'nasType'] = 'k' + module.params.get('nas_type') prot_sources['nasMountCredentials'][ 'skipValidation'] = module.params.get('skip_validation') prot_sources['forceRegister'] = module.params.get('force_register') results['changed'] = True results['source_vars'] = prot_sources if current_status: results = dict( changed=False, msg="The Protection Source for this host is already registered", id=current_status, endpoint=module.params.get('endpoint')) else: response = register_source(module, prot_sources) results = dict( changed=True, msg="Registration of Cohesity Protection Source Complete", **response) elif module.params.get('state') == "absent": if current_status: prot_sources['id'] = current_status response = unregister_source(module, prot_sources) results = dict( changed=True, msg="Unregistration of Cohesity Protection Source Complete", id=current_status, endpoint=module.params.get('endpoint')) else: results = dict( changed=False, msg= "The Protection Source for this host is currently not registered" ) else: # => This error should never happen based on the set assigned to the parameter. # => However, in case, we should raise an appropriate error. module.fail_json(msg="Invalid State selected: {0}".format( module.params.get('state')), changed=False) module.exit_json(**results)
def main(): # => Load the default arguments including those specific to the Cohesity Protection Jobs. argument_spec = cohesity_common_argument_spec() argument_spec.update( dict( state=dict(choices=['present', 'absent'], default='present'), name=dict(type='str', required=True), # => Currently, the only supported environments types are list in the choices # => For future enhancements, the below list should be consulted. # => 'SQL', 'View', 'Puppeteer', 'Pure', 'Netapp', 'HyperV', 'Acropolis', 'Azure' environment=dict(type='str', default='VMware'), job_name=dict(type='str', required=True), endpoint=dict(type='str', required=True), backup_timestamp=dict(type='str', default=''), file_names=dict(type='list', required=True), wait_for_job=dict(type='bool', default=True), overwrite=dict(type='bool', default=True), preserve_attributes=dict(type='bool', default=True), restore_location=dict(type='str', default=''), vm_name=dict(type='str', default=''), vm_username=dict(type='str', default=''), vm_password=dict(type='str', default='', no_log=True), wait_minutes=dict(type='str', default=10))) # => Create a new module object module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) results = dict(changed=False, msg="Attempting to manage Protection Source", state=module.params.get('state')) job_details = dict(token=get__cohesity_auth__token(module), endpoint=module.params.get('endpoint'), job_name=module.params.get('job_name'), environment=module.params.get('environment'), name=module.params.get('job_name') + ": " + module.params.get('name')) global cohesity_client base_controller = BaseController() base_controller.global_headers['user-agent'] = 'Ansible-v2.3.4' cohesity_client = get_cohesity_client(module) if module.params.get('backup_id'): job_details['jobRunId'] = module.params.get('backup_id') if module.params.get('backup_timestamp'): job_details['backup_timestamp'] = module.params.get('backup_timestamp') job_exists = check__protection_restore__exists(module, job_details) if module.check_mode: check_mode_results = dict( changed=False, msg= "Check Mode: Cohesity Protection Restore Job is not currently registered", id="") if module.params.get('state') == "present": if job_exists: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Restore Job is currently registered. No changes" else: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Restore Job is not currently registered. This action would register the Cohesity Protection Job." check_mode_results['id'] = job_exists else: if job_exists: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Restore Job is currently registered. This action would unregister the Cohesity Protection Job." check_mode_results['id'] = job_exists else: check_mode_results[ 'msg'] = "Check Mode: Cohesity Protection Restore Job is not currently registered. No changes." module.exit_json(**check_mode_results) elif module.params.get('state') == "present": if job_exists: results = dict(changed=False, msg="The Restore Job for is already registered", id=job_exists, name=module.params.get('job_name') + ": " + module.params.get('name')) else: response = [] environment = module.params.get('environment') endpoint = module.params.get('endpoint') # => Gather the Source Details source_id = None vcenter_list = cohesity_client.protection_sources.list_protection_sources_root_nodes( environment='k' + environment) for vcenter in vcenter_list: if vcenter.protection_source.vmware_protection_source.name == endpoint: source_id = vcenter.protection_source.id if not source_id: module.fail_json( msg="Vcenter '%s' is not registered to the cluster" % endpoint) vm_id = None vm_name = module.params.get("vm_name") restore_file_list = module.params.get('file_names') job_details['endpoint'] = source_id job_details['file_names'] = restore_file_list # Fetch the virtual machine source id, using which files can be searched. objects = cohesity_client.protection_sources.list_virtual_machines( v_center_id=source_id, names=vm_name) for each_object in objects: if each_object.name == vm_name: vm_id = each_object.id break for file_name in job_details['file_names']: resp = cohesity_client.restore_tasks.search_restored_files( environments='kVMware', search=file_name, source_ids=vm_id) # Fail if the file is not available. if not (resp and resp.files): module.fail_json( msg="File '%s' is not available to restore" % file_name) for file_obj in resp.files: if file_obj.filename != file_name and file_obj.protection_source.name != vm_name: module.fail_json( msg= "File '%s' is not available in virtual machine '%s' to restore" % (file_name, vm_name)) source_object_info = dict( jobId=file_obj.job_id, protectionSourceId=file_obj.source_id, environment="kVMware") get__job_information__for_file(module, source_object_info) # For VMware file restore, VM credentials are mandatory. if not (module.params.get('username') or module.params.get('password')): module.fail_json( msg= "Please provide VM credentials to to proceed with restore." ) restore_data = dict(name=module.params.get('job_name') + ": " + module.params.get('name'), filenames=restore_file_list, targetSourceId=vm_id, targetParentSourceId=source_id, sourceObjectInfo=source_object_info, token=job_details['token'], overwrite=module.params.get('overwrite'), username=module.params.get('vm_username'), password=module.params.get('vm_password'), preserveAttributes=module.params.get( 'preserve_attributes')) if module.params.get('restore_location'): restore_data['newBaseDirectory'] = module.params.get( 'restore_location') response.append(start_restore__files(module, restore_data)) task = dict(changed=False) for jobCheck in response: restore_data['id'] = jobCheck['id'] restore_data['environment'] = environment if module.params.get('wait_for_job'): task = wait_restore_complete(module, restore_data) jobCheck['status'] = task['status'] results = dict(changed=True, msg="Registration of Cohesity Restore Job Complete", name=module.params.get('job_name') + ": " + module.params.get('name'), restore_jobs=response) if 'file_names' in job_details: results['filenames'] = job_details['file_names'] if not task['changed'] and module.params.get('wait_for_job'): # => If the task failed to complete, then the key 'changed' will be False and # => we need to fail the module. results['changed'] = False results.pop('msg') errorCode = "" # => Set the errorCode to match the task['error'] if the key exists if 'error' in task: errorCode = task['error'] module.fail_json(msg="Cohesity Restore Job Failed to complete", error=errorCode, **results) elif module.params.get('state') == "absent": results = dict( changed=False, msg= "Cohesity Restore: This feature (absent) has not be implemented yet.", name=module.params.get('job_name') + ": " + module.params.get('name')) else: # => This error should never happen based on the set assigned to the parameter. # => However, in case, we should raise an appropriate error. module.fail_json(msg="Invalid State selected: {}".format( module.params.get('state')), changed=False) module.exit_json(**results)
def main(): # => Load the default arguments including those specific to the Cohesity protection policy. argument_spec = cohesity_common_argument_spec() argument_spec.update( dict(name=dict(type='str', required=True), description=dict(type='str', default=''), state=dict(choices=['present', 'absent'], default='present'), days_to_retain=dict(type=int, default=90), incremental_backup_schedule=dict(type=dict, required=True), full_backup_schedule=dict(type=dict), blackout_window=dict(type=list), retries=dict(type=int, default=3), retry_interval=dict(type=int, default=30), bmr_backup_schedule=dict(type=dict), log_backup_schedule=dict(type=dict), extended_retention=dict(type=list), archival_copy=dict(type=list))) # => Create a new module object module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) results = dict(changed=False, msg="Attempting to manage Cohesity protection policy", state=module.params.get('state')) global cohesity_client base_controller = BaseController() base_controller.global_headers['user-agent'] = 'cohesity-ansible/v2.3.4' cohesity_client = get_cohesity_client(module) policy_exists, policy_details = get_policy_details(module) if module.check_mode: check_mode_results = dict( changed=False, msg="Check Mode: Cohesity protection policy doesn't exist", id="") if module.params.get('state') == "present": if policy_exists: check_mode_results['msg'] =\ "Check Mode: Cohesity protection policy is already present. No changes" check_mode_results['id'] = policy_details.id else: check_mode_results['msg'] =\ "Check Mode: Cohesity protection policy doesn't exist." \ " This action would create a protection policy" check_mode_results['changed'] = True else: if policy_exists: check_mode_results['msg'] =\ "Check Mode: Cohesity protection policy is present." \ "This action would delete the policy." check_mode_results['id'] = policy_details.id check_mode_results['changed'] = True else: check_mode_results['msg'] =\ "Check Mode: Cohesity protection policy doesn't exist. No changes." module.exit_json(**check_mode_results) elif module.params.get('state') == "present": if policy_exists: results = dict( changed=False, msg= "The Cohesity protection policy with specified name is already present", id=policy_details.id, policy_name=module.params.get('name')) else: create_policy(module) elif module.params.get('state') == "absent": if policy_exists: delete_policy(module, policy_details.id) results = dict(changed=True, msg="Cohesity protection policy is deleted", id=policy_details.id, policy_name=module.params.get('name')) else: results = dict(changed=False, msg="Cohesity protection policy doesn't exist", policy_name=module.params.get('name')) else: module.fail_json(msg="Invalid State selected: {}".format( module.params.get('state')), changed=False) module.exit_json(**results)
def main(): # => Load the default arguments including those specific to the Cohesity Protection Jobs. argument_spec = cohesity_common_argument_spec() argument_spec.update( dict( state=dict(choices=['present', 'absent', 'started', 'stopped'], default='present'), name=dict(type='str', required=True, aliases=['job_name']), description=dict(type='str', default=''), environment=dict(default='kOracle'), protection_policy=dict(type='str', aliases=['policy'], default='Bronze'), storage_domain=dict(type='str', default='DefaultStorageDomain'), time_zone=dict(type='str', default=''), start_time=dict(type='str', default=''), delete_backups=dict(type='bool', default=False), ondemand_run_type=dict( choices=['Regular', 'Full', 'Log', 'System'], default='Regular'), cancel_active=dict(type='bool', default=False), validate_certs=dict(type='bool', default=False), endpoint=dict(type=str, default=''), databases=dict(type=list, default=[]), )) # => Create a new module object module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not module.params.get("time_zone"): module.params["time_zone"] = get_timezone() global cohesity_client cohesity_client = get_cohesity_client(module) results = dict(changed=False, msg='Attempting to manage Protection Source', state=module.params.get('state')) job_exists, job_meta_data = check__protection_job__exists(module) if module.check_mode: check_mode_results = dict( changed=False, msg= 'Check Mode: Cohesity Protection Job is not currently registered', id='') if module.params.get('state') == 'present': if job_exists: check_mode_results[ 'msg'] = 'Check Mode: Cohesity Protection Job is currently registered. No changes' else: check_mode_results[ 'msg'] = 'Check Mode: Cohesity Protection Job is not currently registered. This action would register the Cohesity Protection Job.' check_mode_results['id'] = job_exists else: if job_exists: check_mode_results[ 'msg'] = 'Check Mode: Cohesity Protection Job is currently registered. This action would unregister the Cohesity Protection Job.' check_mode_results['id'] = job_exists else: check_mode_results[ 'msg'] = 'Check Mode: Cohesity Protection Job is not currently registered. No changes.' module.exit_json(**check_mode_results) elif module.params.get('state') == 'present': parent_id, source_id = get_source_id_by_endpoint(module) if not (parent_id and source_id): module.fail_json( msg= "Source '%s' is not registered to cluster, Please register the source and try again." % module.params.get('endpoint')) check__mandatory__params(module) body = ProtectionJobRequestBody() body.name = module.params.get('name') body.parent_source_id = source_id body.source_ids = [source_id] body.view_box_id = get__storage_domain_id__by_name(module) body.environment = module.params.get('environment') body.policy_id = get__prot_policy_id__by_name(module) body.timezone = module.params.get('time_zone').strip() body.description = module.params.get('description') databases = module.params.get('databases') if databases: entity_ids = list() application_nodes = [] body.source_special_parameters = list() resp = cohesity_client.protection_sources.list_protection_sources( environment='kOracle', id=parent_id) if not resp: module.fail_json( msg="Oracle source is not available to protect") for node in resp[0].nodes: application_nodes.extend(node.get("applicationNodes", [])) # Make copy of database list and remove once entity id fetched. This check # is to ensure availability of databases in server. copy_database = copy.deepcopy(databases) for database in databases: for node in application_nodes: if node["protectionSource"]["name"] == database.strip(): entity_ids.append(node["protectionSource"]["id"]) copy_database.remove(database) if len(databases) == len(entity_ids): break if copy_database: module.fail_json( "Following list of databases are not available in the " "Oracle Server: %s" % ", ".join(copy_database)) spl_params = SourceSpecialParameter() spl_params.source_id = source_id spl_params.oracle_special_parameters = OracleSpecialParameters() spl_params.oracle_special_parameters.application_entity_ids = entity_ids body.source_special_parameters.append(spl_params) if module.params.get('start_time'): start_time = list(module.params.get('start_time').replace(':', '')) if not len(start_time) == 4: # => There are only so many options here but if we get more characters # => than four then we need to escape quickly. module.fail_json( msg='Invalid start_time selected (' + module.params.get('start_time') + '). Please review and submit the correct Protection Job Starting time.' ) body.start_time = dict(hour=int(start_time[0] + start_time[1]), minute=int(start_time[2] + start_time[3])) try: if job_exists: response = cohesity_client.protection_jobs.update_protection_job( body, job_exists) msg = 'Updation of Cohesity Protection Job Complete' else: response = cohesity_client.protection_jobs.create_protection_job( body) msg = 'Creation of Cohesity Protection Job Complete' response = dict(id=response.id, name=response.name, environment=response.environment) results = dict(changed=True, msg=msg, **response) except APIException as err: module.fail_json(msg=err.message) elif module.params.get('state') == 'absent': if job_exists: job_id = job_meta_data.uid.id status, _, _ = get_protection_run__status__by_id(module, job_id) if status: stop_job(module, job_id) while True: status, _, _ = get_protection_run__status__by_id( module, job_id) if not status: time.sleep(10) break response = unregister_job(module, job_exists) results = dict( changed=True, msg='Unregistration of Cohesity Protection Job Complete', id=job_exists, name=module.params.get('name')) else: results = dict( changed=False, msg= 'The Protection Job for this host is currently not registered', name=module.params.get('name')) elif module.params.get('state') == 'started': if job_exists: response = start_job(module) results = dict( changed=True, msg='The Protection Job for this host has been started', id=job_exists, name=module.params.get('name')) else: results = dict( changed=False, msg= 'The Protection Job for this host is currently not registered', name=module.params.get('name')) elif module.params.get('state') == 'stopped': if job_exists: job_id = job_meta_data.uid.id response = stop_job(module, job_id) results = dict( changed=True, msg='The Protection Job for this host has been stopped', id=job_id, name=module.params.get('name')) else: results = dict( changed=False, msg= 'The Protection Job for this host is currently not registered', name=module.params.get('name')) else: # => This error should never happen based on the set assigned to the parameter. # => However, in case, we should raise an appropriate error. module.fail_json(msg='Invalid State selected: {0}'.format( module.params.get('state')), changed=False) module.exit_json(**results)
def main(): # => Load the default arguments including those specific to the Cohesity Protection Jobs. argument_spec = cohesity_common_argument_spec() argument_spec.update( dict( state=dict(choices=['present', 'absent', 'started', 'stopped'], default='present'), name=dict(type='str', required=True, aliases=['job_name']), description=dict(type='str', default=''), environment=dict(default='kOracle'), protection_policy=dict(type='str', aliases=['policy'], default='Bronze'), storage_domain=dict(type='str', default='DefaultStorageDomain'), time_zone=dict(type='str', default='America/Los_Angeles'), start_time=dict(type='str', default=''), delete_backups=dict(type='bool', default=False), ondemand_run_type=dict( choices=['Regular', 'Full', 'Log', 'System'], default='Regular'), cancel_active=dict(type='bool', default=False), validate_certs=dict(type='bool', default=False), endpoint=dict(type=str, default=''), )) # => Create a new module object module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) global cohesity_client cohesity_client = get_cohesity_client(module) results = dict(changed=False, msg='Attempting to manage Protection Source', state=module.params.get('state')) job_exists, job_meta_data = check__protection_job__exists(module) if module.check_mode: check_mode_results = dict( changed=False, msg= 'Check Mode: Cohesity Protection Job is not currently registered', id='') if module.params.get('state') == 'present': if job_exists: check_mode_results[ 'msg'] = 'Check Mode: Cohesity Protection Job is currently registered. No changes' else: check_mode_results[ 'msg'] = 'Check Mode: Cohesity Protection Job is not currently registered. This action would register the Cohesity Protection Job.' check_mode_results['id'] = job_exists else: if job_exists: check_mode_results[ 'msg'] = 'Check Mode: Cohesity Protection Job is currently registered. This action would unregister the Cohesity Protection Job.' check_mode_results['id'] = job_exists else: check_mode_results[ 'msg'] = 'Check Mode: Cohesity Protection Job is not currently registered. No changes.' module.exit_json(**check_mode_results) elif module.params.get('state') == 'present': parent_id, source_id = get_source_id_by_endpoint(module) check__mandatory__params(module) body = ProtectionJobRequestBody() body.name = module.params.get('name') body.parent_source_id = parent_id body.source_ids = [source_id] body.view_box_id = get__storage_domain_id__by_name(module) body.environment = module.params.get('environment') body.policy_id = get__prot_policy_id__by_name(module) body.timezone = module.params.get('time_zone').strip() body.description = module.params.get('description') if module.params.get('start_time'): start_time = list(module.params.get('start_time').replace(':', '')) if not len(start_time) == 4: # => There are only so many options here but if we get more characters # => than four then we need to escape quickly. module.fail_json( msg='Invalid start_time selected (' + module.params.get('start_time') + '). Please review and submit the correct Protection Job Starting time.' ) body.start_time = dict(hour=int(start_time[0] + start_time[1]), minute=int(start_time[2] + start_time[3])) if job_exists: response = cohesity_client.protection_jobs.update_protection_job( body, job_exists) msg = 'Updation of Cohesity Protection Job Complete' else: response = cohesity_client.protection_jobs.create_protection_job( body) msg = 'Creation of Cohesity Protection Job Complete' response = dict(id=response.id, name=response.name, environment=response.environment) results = dict(changed=True, msg=msg, **response) elif module.params.get('state') == 'absent': if job_exists: job_id = job_meta_data.uid.id status, _, _ = get_protection_run__status__by_id(module, job_id) if status: stop_job(module, job_id) while True: status, _, _ = get_protection_run__status__by_id( module, job_id) if not status: time.sleep(10) break response = unregister_job(module, job_exists) results = dict( changed=True, msg='Unregistration of Cohesity Protection Job Complete', id=job_exists, name=module.params.get('name')) else: results = dict( changed=False, msg= 'The Protection Job for this host is currently not registered', name=module.params.get('name')) elif module.params.get('state') == 'started': if job_exists: response = start_job(module) results = dict( changed=True, msg='The Protection Job for this host has been started', id=job_exists, name=module.params.get('name')) else: results = dict( changed=False, msg= 'The Protection Job for this host is currently not registered', name=module.params.get('name')) elif module.params.get('state') == 'stopped': if job_exists: job_id = job_meta_data.uid.id response = stop_job(module, job_id) results = dict( changed=True, msg='The Protection Job for this host has been stopped', id=job_id, name=module.params.get('name')) else: results = dict( changed=False, msg= 'The Protection Job for this host is currently not registered', name=module.params.get('name')) else: # => This error should never happen based on the set assigned to the parameter. # => However, in case, we should raise an appropriate error. module.fail_json(msg='Invalid State selected: {0}'.format( module.params.get('state')), changed=False) module.exit_json(**results)