def _process_drbackup_response(self, flag, response): """DR Backup response will be processed. Args: flag, response (str) -- results of DR backup JSON request Returns: object - instance of the Job class for this restore job Raises: SDKException: if job initialization failed if response is empty if response is not success """ if flag: if response.json(): if "jobIds" in response.json(): return Job(self.commcell, response.json()['jobIds'][0]) elif "errorCode" in response.json(): o_str = 'Initializing backup failed\nError: "{0}"'.format( response.json()['errorMessage']) raise SDKException('Response', '102', o_str) else: raise SDKException('Response', '102') else: response_string = self.commcell._update_response_(response.text) raise SDKException('Response', '101', response_string)
def get(self, recovery_target_name): """Returns a target object. Args: target_name (str) -- name of the target Returns: object - instance of the target class for the given target name Raises: SDKException: if type of the target name argument is not string if no target exists with the given name """ if not isinstance(recovery_target_name, basestring): raise SDKException('Target', '101') else: recovery_target_name = recovery_target_name.lower() if self.has_recovery_target(recovery_target_name): return RecoveryTarget(self._commcell_object, recovery_target_name, self.all_targets[recovery_target_name]) raise SDKException( 'RecoveryTarget', '102', 'No target exists with name: {0}'.format(recovery_target_name))
def _get_blr_pair_details(commcell_object): """ Fetches all BLR Pair Details. This function is being used both by VMWareBackupset as well as _BLRReplicationPair Args: commcell_object (Commcell) : Commcell object Returns: Dict containing vm name as key and its details as its value """ flag, response = commcell_object._cvpysdk_object.make_request( "GET", commcell_object._services["CONTINUOUS_REPLICATION_MONITOR"]) try: assert response.json()["summary"]["totalPairs"] != 0 return { pair["sourceName"]: pair for pair in response.json()["siteInfo"] } except AssertionError: return None except (JSONDecodeError, KeyError) as error: err_msg = "Failed to fetch BLR pair details. %s", response.json().get( "errorMessage", "") if flag else "" raise SDKException("Backupset", 102, err_msg) from error
def _restore_in_place(self, **kwargs): """Restore azure ad objects Args: kwargs dict additional dict passed for restore, passed from subclient instance Return: request_json json restore json file Raise: 102 restore option is not valid """ request_json = self._restore_json(**kwargs) if "overwrite" in kwargs['fs_options']: request_json["taskInfo"]["subTasks"][0]\ ["options"]["restoreOptions"]['commonOptions']['unconditionalOverwrite'] = \ kwargs['fs_options']['overwrite'] if "azureADOption" in kwargs['restore_option']: request_json["taskInfo"]["subTasks"][0]\ ["options"]["restoreOptions"]['azureADOption'] = \ kwargs['restore_option']['azureADOption'] else: raise SDKException('Instance', "102", "AzureAD option is not valid") return self._process_restore_response(request_json)
def _make_request(self, payload): flag, response = self._commcell._cvpysdk_object.make_request( "PUT", self._commcell._services["CONTINUOUS_REPLICATION_MONITOR"], payload) try: assert response.json() == {"errorCode": 0} except (JSONDecodeError, AssertionError) as error: err_msg = "Failed to modify BLR pair state. %s", response.json().get("errorMessage", "") if flag else "" raise SDKException("Backupset", 102, err_msg) from error
def delete(self): """Deletes the pair""" flag, response = self._commcell._cvpysdk_object.make_request( "DELETE", "%s/%s" % (self._commcell._services["CONTINUOUS_REPLICATION_MONITOR"], self._details["id"])) try: assert response.json() == {} except AssertionError as error: err_msg = "Failed to delete BLR pair. %s", response.json().get("errorMessage", "") if flag else "" raise SDKException("Backupset", 102, err_msg) from error
def _send_xml(self, request_dict): """Sends the boot request xml""" xml_payload = xmltodict.unparse(request_dict) response = self._commcell.execute_qcommand("qoperation execute", xml_payload) try: return response.json()["jobIds"][0] except (KeyError, JSONDecodeError) as error: raise SDKException("Backupset", 102, "Boot was not successful. %s" % response.json().get("errorMessage", "")) from error
def _restore_destination_json(self, value): """setter for the destination restore option in restore JSON""" if not isinstance(value, dict): raise SDKException('Subclient', '101') self._destination_restore_json = { "inPlace": value.get("in_place", True), "destClient": { "clientName": value.get("client_name", ""), "clientId": value.get("client_id", -1) } }
def run_report(self): """ Executes the report Returns: str: Job ID """ flag, response = self._cvpysdk_commcell_object.make_request( 'POST', self._services['CREATE_TASK'], self._request_json) try: return response.json()['jobIds'][0] except Exception: raise SDKException("RunReportError", '101', response.json()["errorMessage"])
def _case_definition_request(self, defination_json): """Runs the case defination ass API to add definition for case Args: defination_json (dict) -- request json sent as payload Returns: (str, str): str - error code received in the response str - error message received Raises: SDKException: if response is empty if response is not success """ _CASE_DEFINITION = self._commcell_object._services['CASEDEFINITION'] flag, response = self._commcell_object._cvpysdk_object.make_request( 'POST', _CASE_DEFINITION, defination_json ) if flag: try: if response.json(): if 'cmDef' not in response.json(): error_message = response.json()['errorMessage'] output_string = 'Failed to add defination\nError: "{0}"' raise SDKException( 'Subclient', '102', output_string.format(error_message) ) else: self.refresh() except ValueError: raise SDKException('Response', '102') else: response_string = self._commcell_object._update_response_(response.text) raise SDKException('Response', '101', response_string)
def _process_createtask_response(self, request_json): """Runs the CreateTask API with the request JSON provided for DR backup, and returns the contents after parsing the response. Args: request_json (dict) -- JSON request to run for the API Returns: object - instance of the Job class for this restore job Raises: SDKException: if restore job failed if response is empty if response is not success """ flag, response = self.commcell._cvpysdk_object.make_request( 'POST', self._CREATE_TASK, request_json) if flag: if response.json(): if "jobIds" in response.json(): return Job(self.commcell, response.json()['jobIds'][0]) elif "errorCode" in response.json(): error_message = response.json()['errorMessage'] o_str = 'DR backup job failed\nError: "{0}"'.format( error_message) raise SDKException('Response', '102', o_str) else: raise SDKException('Response', '102', 'Failed to run the DR backup job') else: raise SDKException('Response', '102') else: response_string = self.commcell._update_response_(response.text) raise SDKException('Response', '101', response_string)
def export_report(self): """ Executes the backup job summary report Returns: str: Job ID """ response = self.commcell.execute_qcommand("qoperation execute", self.__xml) try: return response.json()["jobIds"][0] except Exception as error: raise SDKException("Backupset", 102, " %s" % response.json().get("errorMessage")) from error
def _get_recovery_targets(self): """Gets all the recovery targets. Returns: dict - consists of all targets in the client { "target1_name": target1_id, "target2_name": target2_id } Raises: SDKException: if response is empty if response is not success """ flag, response = self._cvpysdk_object.make_request( 'GET', self._RECOVERY_TARGETS) if flag: if response.json() and 'policy' in response.json(): recovery_target_dict = {} for dictionary in response.json()['policy']: temp_name = dictionary['entity'][ 'vmAllocPolicyName'].lower() recovery_target_dict[temp_name] = str( dictionary['entity']['vmAllocPolicyId']) return recovery_target_dict else: raise SDKException('Response', '102') else: raise SDKException('Response', '101', self._update_response_(response.text))
def restore_in_place(self, paths, overwrite=True, copy_precedence=None, no_of_streams=2): """Restores the files/folders specified in the input paths list to the same location. Args: paths (list) -- list of full paths of files/folders to restore overwrite (bool) -- unconditional overwrite files during restore default: True copy_precedence (int) -- copy precedence value of storage policy copy default: None no_of_streams (int) -- number of streams for restore default : 2 Returns: object - instance of the Job class for this restore job Raises: SDKException: if paths is not a list if failed to initialize job if response is empty if response is not success """ if not (isinstance(paths, list) and isinstance(overwrite, bool)): raise SDKException('Instance', '101') request_json = self._generate_json( paths=paths, destination_client=self.client_name, destination_instance_name=self.instance_name, overwrite=overwrite, in_place=True, copy_precedence=copy_precedence, restore_To_FileSystem=False, no_of_streams=no_of_streams) return self._process_restore_response(request_json)
def _restore_common_options_json(self, value): """setter for the Common options of in restore JSON""" if not isinstance(value, dict): raise SDKException('Instance', '101') self._commonoption_restore_json = { "allVersion": True, "offlineMiningRestore": False, "skip": True, "restoreACLs": False, "erExSpdbPathRestore": True, "unconditionalOverwrite": False, "siteReplicationrestore": False, "append": False }
def get_blr_replication_pair(self, vm_name): """Fetches the BLR pair Args: vm_name (str): Name of the VM Returns: An instance of _BLRReplicationPair """ try: return _BLRReplicationPair(self._commcell_object, vm_name, self._blr_pair_details[vm_name]) except KeyError as error: raise SDKException( "Backupset", 102, "Cannot find the VM with the given name[Names are case sensitive]") from error
def _set_common_options_json(self, value): """ Setter for the Common options in restore JSON Args: value (dict) -- dict of common options for restore json """ if not isinstance(value, dict): raise SDKException('Instance', '101') self._common_options_json = { "overwriteFiles": True, "unconditionalOverwrite": value.get("overwrite", False), "stripLevelType": 1 }
def has_recovery_target(self, target_name): """Checks if a target is present in the commcell. Args: target_name (str) -- name of the target Returns: bool - boolean output whether the target is present in commcell or not Raises: SDKException: if type of the target name argument is not string """ if not isinstance(target_name, basestring): raise SDKException('Target', '101') return self._recovery_targets and target_name.lower( ) in self._recovery_targets
def _restore_browse_option_json(self, value): """setter for the Browse options for restore in Json""" if not isinstance(value, dict): raise SDKException('Instance', '101') time_range_dict = {} if value.get('to_time'): time_range_dict['toTime'] = value.get('to_time') self._browse_restore_json = { "commCellId": int(self._commcell_object.commcell_id), "showDeletedItems": value.get("showDeletedItems", False), "backupset": { "clientName": self._agent_object._client_object.client_name, "appName": self._agent_object.agent_name, "clientId": int(self._instance['clientId']), "backupsetId": int(self._restore_association['backupsetId']) }, "timeRange": time_range_dict }
def disaster_recovery_backup(self): """Runs a DR job for Commserv Args: backup_level (str) -- level of backup the user wish to run Full / / Differential default: Full Returns: object - instance of the Job class for this backup job Raises: SDKException: if backup level specified is not correct if response is empty if response is not success """ if self.backuptype.lower() not in ['full', 'differential']: raise SDKException('Response', '103') backuptypes = {"full": 1, "differential": 3} if self.advbackup: self.backuptype = backuptypes[self.backuptype.lower()] return self._advanced_dr_backup() else: dr_service = self.commcell._services['DRBACKUP'] request_json = { "isCompressionEnabled": self.iscompression_enabled, "jobType": 1, "backupType": backuptypes[self.backuptype.lower()] } flag, response = self.commcell._cvpysdk_object.make_request( 'POST', dr_service, request_json) return self._process_drbackup_response(flag, response)
def _get_recovery_target_properties(self): """Gets the target properties of this target. Raises: SDKException: if response is empty if response is not success """ flag, response = self._cvpysdk_object.make_request( 'GET', self._RECOVERY_TARGET) if flag: if response.json() and 'policy' in response.json(): self._recovery_target_properties = response.json()['policy'][0] self._application_type = self._recovery_target_properties[ 'vmPolicyAppType'] self._destination_hypervisor = self._recovery_target_properties[ 'destinationHyperV']['clientName'] vm_name_edit_string = self._recovery_target_properties.get( 'vmNameEditString') vm_name_edit_type = self._recovery_target_properties.get( 'vmNameEditType', 1) if vm_name_edit_string and vm_name_edit_type == 2: self._vm_suffix = self._recovery_target_properties[ 'vmNameEditString'] elif vm_name_edit_string and vm_name_edit_type == 1: self._vm_prefix = self._recovery_target_properties[ 'vmNameEditString'] self._access_node = self._recovery_target_properties[ 'proxyClientEntity']['clientName'] self._users = self._recovery_target_properties[ 'securityAssociations']['users'] self._policy_type = self._recovery_target_properties["entity"][ "policyType"] if self._policy_type == 1: self._availability_zone = ( self._recovery_target_properties.get( 'amazonPolicy', {}).get('availabilityZones', [{}])[0].get('availabilityZoneName', None)) self._volume_type = self._recovery_target_properties.get( 'amazonPolicy', {}).get('volumeType', None) # TODO: Encryption key support for SDK self._encryption_key = None self._destination_network = self._recovery_target_properties.get( 'networkList', [{}])[0].get('name', None) self._security_group = self._recovery_target_properties.get( 'securityGroups', [{}])[0].get('name', '') self._instance_type = ( self._recovery_target_properties.get( 'amazonPolicy', {}).get('instanceType', [{}])[0].get('instanceType', {}).get( 'vmInstanceTypeName', '')) expiry_hours = self._recovery_target_properties.get( "minutesRetainUntil", None) expiry_days = self._recovery_target_properties.get( "daysRetainUntil", None) if expiry_hours: self._expiration_time = f'{expiry_hours} hours' elif expiry_days: self._expiration_time = f'{expiry_days} days' self._test_virtual_network = self._recovery_target_properties.get( 'networkInfo', [{}])[0].get('label', None) self._test_vm_size = (self._recovery_target_properties.get( 'amazonPolicy', {}).get('vmInstanceTypes', [{}])[0].get('vmInstanceTypeName', '')) elif self._policy_type == 2: self._vm_folder = self._recovery_target_properties[ 'dataStores'][0]['dataStoreName'] self._destination_network = self._recovery_target_properties[ 'networkList'][0]['networkName'] elif self._policy_type == 7: self._resource_group = self._recovery_target_properties[ 'esxServers'][0]['esxServerName'] self._region = self._recovery_target_properties['region'] self._availability_zone = ( self._recovery_target_properties['amazonPolicy'] ['availabilityZones'][0]['availabilityZoneName']) self._storage_account = self._recovery_target_properties[ 'dataStores'][0]['dataStoreName'] self._vm_size = ( self._recovery_target_properties['amazonPolicy'] ['vmInstanceTypes'][0]['vmInstanceTypeName']) self._disk_type = self._recovery_target_properties[ 'amazonPolicy']['volumeType'] self._virtual_network = self._recovery_target_properties[ 'networkList'][0]['networkDisplayName'] self._security_group = self._recovery_target_properties[ 'securityGroups'][0]['name'] self._create_public_ip = self._recovery_target_properties[ 'isPublicIPSettingsAllowed'] self._restore_as_managed_vm = self._recovery_target_properties[ 'restoreAsManagedVM'] expiry_hours = self._recovery_target_properties.get( "minutesRetainUntil") expiry_days = self._recovery_target_properties.get( "daysRetainUntil") if expiry_hours: self._expiration_time = f'{expiry_hours} hours' elif expiry_days: self._expiration_time = f'{expiry_days} days' self._test_virtual_network = self._recovery_target_properties[ 'networkInfo'][0]['label'] self._test_vm_size = ( self._recovery_target_properties['amazonPolicy'] ['instanceType'][0]['instanceType'] ['vmInstanceTypeName']) elif self._policy_type == 13: self._destination_host = self._recovery_target_properties[ 'esxServers'][0]['esxServerName'] self._datastore = self._recovery_target_properties[ 'dataStores'][0]['dataStoreName'] self._resource_pool = self._recovery_target_properties[ 'resourcePoolPath'] self._vm_folder = self._recovery_target_properties[ 'folderPath'] self._destination_network = self._recovery_target_properties[ 'networkList'][0]['destinationNetwork'] expiry_hours = self._recovery_target_properties.get( "minutesRetainUntil") expiry_days = self._recovery_target_properties.get( "daysRetainUntil") if expiry_hours: self._expiration_time = f'{expiry_hours} hours' elif expiry_days: self._expiration_time = f'{expiry_days} days' if self._recovery_target_properties.get('mediaAgent', {}): self._failover_ma = self._recovery_target_properties[ 'mediaAgent']['clientName'] self._isolated_network = self._recovery_target_properties.get( "createIsolatedNetwork") self._no_of_cpu = self._recovery_target_properties.get( 'maxCores') self._no_of_vm = self._recovery_target_properties.get( 'maxVMQuota') self._iso_paths = [ iso['isoPath'] for iso in self._recovery_target_properties.get( 'isoInfo', []) ] if self._recovery_target_properties.get( 'associatedClientGroup'): self._server_group = (self._recovery_target_properties[ "associatedClientGroup"]["clientGroupName"]) else: raise SDKException('Response', '102') else: raise SDKException('Response', '101', self._update_response_(response.text))
def restore_out_of_place(self, client, destination_path, overwrite=True, restore_data_and_acl=True, copy_precedence=None, from_time=None, to_time=None, fs_options=None): """Restores the files/folders specified in the input paths list to the input client, at the specified destionation location. Args: client (str/object) -- either the name of the client or the instance of the Client destination_path (str) -- full path of the restore location on client paths (list) -- list of full paths of files/folders to restore overwrite (bool) -- unconditional overwrite files during restore default: True restore_data_and_acl (bool) -- restore data and ACL files default: True copy_precedence (int) -- copy precedence value of storage policy copy default: None from_time (str) -- time to retore the contents after format: YYYY-MM-DD HH:MM:SS default: None to_time (str) -- time to retore the contents before format: YYYY-MM-DD HH:MM:SS default: None fs_options (dict) -- dictionary that includes all advanced options options: preserve_level : preserve level option to set in restore proxy_client : proxy that needed to be used for restore impersonate_user : Impersonate user options for restore impersonate_password: Impersonate password option for restore in base64 encoded form all_versions : if set to True restores all the versions of the specified file versions : list of version numbers to be backed up Returns: object - instance of the Job class for this restore job Raises: SDKException: if client is not a string or Client instance if destination_path is not a string if paths is not a list if failed to initialize job if response is empty if response is not success """ if not ((isinstance(client, str) or isinstance(client, Client)) and isinstance(destination_path, str) and isinstance(overwrite, bool) and isinstance(restore_data_and_acl, bool)): raise SDKException('Response', '101') if fs_options is None: fs_options = {} if isinstance(client, Client): client = client elif isinstance(client, str): client = Client(self.commcell, client) else: raise SDKException('Response', '105') drpath = self.path + "\\CommserveDR" destination_path = self._filter_paths([destination_path], True) drpath = [self._filter_paths([drpath], True)] if drpath == []: raise SDKException('Response', '104') #client_obj = self.commcell.clients.get(self.commcell.commserv_name) agent_obj = client.agents.get("File System") instance_obj = agent_obj.instances.get("DefaultInstanceName") instance_obj._restore_association = { "type": "0", "backupsetName": "DR-BackupSet", "instanceName": "DefaultInstanceName", "appName": "CommServe Management", "clientName": self.commcell.commserv_name, "consumeLicense": True, "clientSidePackage": True, "subclientName": "" } return instance_obj._restore_out_of_place( client, destination_path, paths=drpath, overwrite=overwrite, restore_data_and_acl=restore_data_and_acl, copy_precedence=copy_precedence, from_time=from_time, to_time=to_time, fs_options=fs_options)
def __init__(self, backupset_object, subclient_name, subclient_id): raise SDKException('Subclient', '102', 'Subclient for Instance: "{0}" is not yet supported'. format(backupset_object._instance_object.instance_name))
def __init__(self, agent_object, instance_name, instance_id=None): raise SDKException('Instance', '102', 'Instance: "{0}" is not yet supported'. format(instance_name))
def restore_out_of_place(self, paths, destination_client, destination_instance_name, destination_path, overwrite=True, copy_precedence=None): """Restores the files/folders specified in the input paths list to the input client, at the specified destination location. Args: paths (list) -- list of full paths of files/folders to restore destination_client (str) -- name of the client to which the files are to be restored. destination_instance_name(str) -- name of the instance to which the files are to be restored. destination_path (str) -- location where the files are to be restored in the destination instance. overwrite (bool) -- unconditional overwrite files during restore default: True copy_precedence (int) -- copy precedence value of storage policy copy default: None Returns: object - instance of the Job class for this restore job Raises: SDKException: if client is not a string or Client object if destination_path is not a string if paths is not a list if failed to initialize job if response is empty if response is not success """ if not ((isinstance(destination_client, basestring) or isinstance(destination_client, Client)) and isinstance(destination_instance_name, basestring) and isinstance(destination_path, basestring) and isinstance(paths, list) and isinstance(overwrite, bool)): raise SDKException('Instance', '101') request_json = self._generate_json( paths=paths, destination_client=destination_client, destination_instance_name=destination_instance_name, destination_path=destination_path, overwrite=overwrite, in_place=False, copy_precedence=copy_precedence, restore_To_FileSystem=False) return self._process_restore_response(request_json)
def add_definition(self, definition_name, custodian_info, email_filters=None): """Add definition for UserMailboxSubclient. Args: custodian_info (dict) -- list of users info to the case subclient custodian_info = [ { "smtp": "*****@*****.**", "name": "ee2", "guid": "1b690719-72af-4d13-9ce0-577962cd165d" }, { "smtp": "*****@*****.**", "name": "ee15", "guid": "86139703-b8e7-41b9-824f-47f3f4b0dde1" } ] """ if not isinstance(custodian_info, list): raise SDKException('Subclient', '101') try: self._filter_list = [] if email_filters: self._prepare_email_filter_list(email_filters) self.custodian_info = [] for mailbox_item in custodian_info: custodian_dict = { 'smtp': mailbox_item['smtp'], 'name': mailbox_item['name'], 'guid': mailbox_item['guid'], 'isGroup': 0 } self.custodian_info.append(custodian_dict) except KeyError as err: raise SDKException('Subclient', '102', '{} not given in content'.format(err)) definition_json = { "mode": 1, "cmDef": { "caseId": int(self._client_object.client_id), "name": definition_name, "ownerId": 274, "defXml": { "holdInfo": self.json_hold_info, "custodianInfo": self.custodian_info, "searchReq": self.json_search_request } } } self._case_definition_request(definition_json)
def restore_using_proxy(self, paths, destination_client_proxy, destination_path, overwrite=True, copy_precedence=None, destination_cloud=None): """ To perform restore to different cloud using proxy passing explicit credentials of destination cloud Args: destination_client_proxy (str) -- name of proxy machine having cloud connector package paths (list) -- list of full paths of files/folders to restore destination_path (str) -- location where the files are to be restored in the destination instance. overwrite (bool) -- unconditional overwrite files during restore default: True copy_precedence (int) -- copy precedence value of storage policy copy default: None destination_cloud (dict(dict)) -- dict of dict representing cross cloud credentials Sample dict(dict) : destination_cloud = { 'google_cloud': { 'google_host_url':'storage.googleapis.com', 'google_access_key':'xxxxxx', 'google_secret_key':'yyyyyy' } } destination_cloud = { 'amazon_s3': { 's3_host_url':'s3.amazonaws.com', 's3_access_key':'xxxxxx', 's3_secret_key':'yyyyyy' } } destination_cloud = { 'azure_blob': { 'azure_host_url':'blob.core.windows.net', 'azure_account_name':'xxxxxx', 'azure_access_key':'yyyyyy' } } Returns: object - instance of the Job class for this restore job Raises: SDKException: if destination cloud credentials empty if destination cloud has more than one vendor details if unsupported destination cloud for restore is chosen if client is not a string or Client object if destination_path is not a string if paths is not a list if failed to initialize job if response is empty if response is not success """ # Check if destination cloud credentials are empty if destination_cloud is None: raise SDKException('Instance', '102', 'Destination Cloud Credentials empty') if len(destination_cloud) > 1: raise SDKException( 'Instance', '102', 'only one cloud vendor details can' 'be passed.Multiple entries not allowed') cloud_vendors = ["google_cloud", "amazon_s3", "azure_blob"] # Check if destination cloud falls within supported cloud vendors cloud_vendors = ["google_cloud", "amazon_s3", "azure_blob"] # Check if destination cloud falls within supported cloud vendors dict_keys = list(destination_cloud.keys()) if dict_keys[0] not in cloud_vendors: raise SDKException('Instance', '102', 'Unsupported destination cloud for restore') if not ((isinstance(destination_client_proxy, basestring) or isinstance(destination_client_proxy, Client)) and isinstance(destination_path, basestring) and isinstance(paths, list) and isinstance(overwrite, bool)): raise SDKException('Instance', '101') request_json = self._generate_json( paths=paths, destination_proxy=True, destination_client=destination_client_proxy, destination_instance_name=None, destination_path=destination_path, overwrite=overwrite, in_place=False, copy_precedence=copy_precedence, restore_To_FileSystem=False) self._set_proxy_credential_json(destination_cloud) request_json["taskInfo"]["subTasks"][0]["options"]["restoreOptions"][ "cloudAppsRestoreOptions"]["cloudStorageRestoreOptions"][ "restoreDestination"] = self._proxy_credential_json request_json["taskInfo"]["subTasks"][0]["options"]["restoreOptions"][ "cloudAppsRestoreOptions"]["cloudStorageRestoreOptions"][ "overrideCloudLogin"] = True request_json["taskInfo"]["subTasks"][0]["options"]["restoreOptions"][ "browseOption"]["backupset"].update( {"backupsetName": "defaultBackupSet"}) request_json["taskInfo"]["associations"][0][ "backupsetName"] = "defaultBackupSet" return self._process_restore_response(request_json)
def full_vm_restore_out_of_place(self, vm_to_restore=None, destination_client=None, proxy_client=None, new_name=None, host=None, datastore=None, overwrite=True, power_on=True, copy_precedence=0, disk_provisioning='original'): """Restores the FULL Virtual machine specified in the input list to the provided vcenter client along with the ESX and the datastores. If the provided client name is none then it restores the Full Virtual Machine to the source client and corresponding ESX and datastore. Args: vm_to_restore (list) -- provide the VM name to restore default: None destination_client (basestring) -- name of the Pseudo client where the VM should be restored. new_name (basestring) -- new name to be given to the restored VM Host (basestring) -- destination cluster or host restores to the source VM esx if this value is not specified datastore (basestring) -- datastore where the restored VM should be located restores to the source VM datastore if this value is not specified overwrite (bool) -- overwrite the existing VM default: True power_on (bool) -- power on the restored VM default: True copy_precedence (int) -- copy precedence value default: 0 disk_option (basestring) -- disk provisioning for the restored vm default: 0 which is equivalent to Original proxy_client (basestring) -- proxy client to be used for restore default: proxy added in subclient Returns: object - instance of the Job class for this restore job Raises: SDKException: if inputs are not of correct type as per definition if failed to initialize job if response is empty if response is not success """ restore_option = {} if vm_to_restore: vm_to_restore = [vm_to_restore] if new_name: if not (isinstance(vm_to_restore, basestring) or isinstance(new_name, basestring)): raise SDKException('Subclient', '101') restore_option['restore_new_name'] = new_name # set attr for all the option in restore xml from user inputs self._set_restore_inputs( restore_option, vm_to_restore=self._set_vm_to_restore(vm_to_restore), unconditional_overwrite=overwrite, power_on=power_on, disk_option=self._disk_option[disk_provisioning], copy_precedence=copy_precedence, volume_level_restore=1, client_name=proxy_client, vcenter_client=destination_client, esx_host=host, datastore=datastore, in_place=False, restore_new_name=new_name) request_json = self._prepare_fullvm_restore_json(restore_option) return self._process_restore_response(request_json)
def _generatedrbackupjson(self): """ Generate JSON corresponds to DR backup job """ try: self._task = { "taskFlags": { "disabled": False }, "policyType": "DATA_PROTECTION", "taskType": "IMMEDIATE", "initiatedFrom": "COMMANDLINE" } self._subtask = { "subTaskType": "ADMIN", "operationType": "DRBACKUP" } clientdict = [] if self._client_list is not None: for client in self._client_list: client = { "type": 0, "clientName": client, "clientSidePackage": True, "consumeLicense": True } clientdict.append(client) self._droptions = { "drbackupType": self.backuptype, "dbName": "commserv", "backupHistoryDataBase": self.ishistorydb, "backupWFEngineDataBase": self.isworkflowdb, "enableDatabasesBackupCompression": self.iscompression_enabled, "client": clientdict } request_json = { "taskInfo": { "task": self._task, "subTasks": [{ "subTaskOperation": 1, "subTask": self._subtask, "options": { "adminOpts": { "drBackupOption": self._droptions, "contentIndexingOption": { "subClientBasedAnalytics": False } }, "restoreOptions": { "virtualServerRstOption": { "isBlockLevelReplication": False } } } }] } } return request_json except Exception as err: raise SDKException('Response', '101', err)
def restore_to_fs(self, paths, destination_path, destination_client=None, overwrite=True, copy_precedence=None, no_of_streams=2): """Restores the files/folders specified in the input paths list to the input client, at the specified destination location. Args: paths (list) -- list of full paths of files/folders to restore destination_path (str) -- location where the files are to be restored in the destination instance. destination_client (str) -- name of the fs client to which the files are to be restored. default: None for restores to backup or proxy client. overwrite (bool) -- unconditional overwrite files during restore default: True copy_precedence (int) -- copy precedence value of storage policy copy default: None no_of_streams (int) -- number of streams for restore default : 2 Returns: object - instance of the Job class for this restore job Raises: SDKException: if client is not a string or client object if destination_path is not a string if paths is not a list if failed to initialize job if response is empty if response is not success """ if not ((isinstance(destination_client, basestring) or isinstance(destination_client, Client)) and isinstance(destination_path, basestring) and isinstance(paths, list) and isinstance(overwrite, bool)): raise SDKException('Instance', '101') destination_appTypeId = int( self._commcell_object.clients.get(destination_client).agents.get( 'file system').agent_id) request_json = self._generate_json( paths=paths, destination_path=destination_path, destination_client=destination_client, overwrite=overwrite, in_place=False, copy_precedence=copy_precedence, restore_To_FileSystem=True, no_of_streams=no_of_streams, destination_appTypeId=destination_appTypeId) return self._process_restore_response(request_json)