def login(): auth_read = False if os.path.isfile(cvauthfile): authdata = json.load(open(cvauthfile)) if 'username' in authdata and 'password' in authdata and 'consoleurl' in authdata: commcell_username = authdata['username'] commcell_password = decode(enckey, authdata['password']) webconsole_hostname = authdata['consoleurl'] auth_read = True if not auth_read: webconsole_hostname = input("Username: "******"Username: "******"Password for " + commcell_username + ": ") try: commcell = Commcell(webconsole_hostname, commcell_username, commcell_password) except BaseException as e: print(str(e)) sys.exit(1) # print('User ' + commcell_username + ' logged in successfully on ' + commcell.commserv_name + '.') return commcell
def login(module): """ sign in the user to the commcell with the credentials provided Args: module (dict) -- webconsole and authentication details """ global commcell_object if module.get('authtoken'): commcell_object = Commcell(module['webconsole_hostname'], authtoken=module['authtoken']) else: commcell_object = Commcell( webconsole_hostname=module['webconsole_hostname'], commcell_username=module['commcell_username'], commcell_password=module['commcell_password'])
def main(): commvault = Commcell(**config.COMMVAULT) job_controller = JobController(commvault) # DATA_VERIFICATION jobs = job_controller.active_jobs(job_type_list=[31]) for job_id in jobs: job = job_controller.get(job_id) if job.status == 'Suspended' and sys.argv[1] == 'resume': job.resume(wait_for_job_to_resume=True) logger.info(f'job ({job_id}) has been resumed') elif job.status == 'Running' and sys.argv[1] == 'suspend': job.pause(wait_for_job_to_pause=True) logger.info(f'job ({job_id}) has been suspended') commvault.logout()
def main(): jira = JIRA(**config.JIRA) commvault = Commcell(**config.COMMVAULT) job_controller = JobController(commvault) for service_name in config.SETTINGS['sox_services']: client_group = ClientGroup(commvault, service_name) clients = client_group.associated_clients issues = [] for client_name in clients: jobs = job_controller.all_jobs( client_name=client_name, job_summary='full', limit=config.SETTINGS['commvault']['jobs_limit'], lookup_time=config.SETTINGS['commvault']['lookup_time'], ) for job_id in jobs: job = jobs[job_id] job_status = job['status'].lower() job_failed_files = job['totalFailedFiles'] job_failed_folders = job['totalFailedFolders'] if (job_status == 'completed' and (not (job_failed_files or job_failed_folders) or job['appTypeName'] == 'Virtual Server')): continue issue = { 'job_id': job_id, 'client': client_name, 'status': job_status, 'percent': job['percentComplete'], 'reason': '', 'comment': '', } logger.info(f'client={issue["client"]} ' f'job_id={issue["job_id"]} ' f'status={issue["status"]} ' f'failed_files={job_failed_files} ' f'failed_folders={job_failed_folders}') if job_status in ['running', 'waiting']: message = f'Progress: {job["percentComplete"]}%' issue['comment'] = make_comment(issue, message) elif job_status in [ 'pending', 'failed', 'killed', 'suspended', 'failed to start' ]: issue['reason'] = job['pendingReason'] pattern = 'backup activity for subclient .+ is disabled' if re.match(pattern, issue['reason'], flags=re.IGNORECASE): issue['reason'] = ('Backup activity for subclient ' 'is disabled') elif (job_status == 'completed' and (job_failed_files or job_failed_folders)): issue['reason'] = (f'Failed to back up: ' f'{job_failed_folders} Folders, ' f'{job_failed_files} Files') elif (job['appTypeName'] == 'Virtual Server' and job_status == 'completed w/ one or more errors'): issue['reason'] = job_status job_detail = job_controller.get( job_id).details['jobDetail'] vms = job_detail['clientStatusInfo']['vmStatus'] # After restoring VM with new name, Commvault renames old client name # For example, src: srv-tibload-001, dest: srv-tibload-001_20102020 client_vm_name = client_name.split('_')[0] vm_found = False for vm in vms: if vm['vmName'].startswith(client_vm_name): vm_found = True issue['reason'] = vm['FailureReason'] break if not vm_found: logger.error(f'{client_vm_name} is not found ' f'in the job ({job_id})') elif job_status == 'completed w/ one or more errors': issue['reason'] = job['pendingReason'] elif job_status == 'committed': issue['reason'] = ('Job was cancelled, but ' 'some items successfully backed up') else: logger.error(f'undefined job: {job}') if issue['reason'] and not issue['comment']: for error in config.SETTINGS['known_errors']: if error.lower() in issue['reason'].lower(): link = config.SETTINGS['wiki'] + '/display/IDG/' link += '+'.join(error.split()) message = f'[{error}|{link}]' issue['comment'] = make_comment(issue, message) break issues.append(issue) comment = '' issue_can_be_closed = True for issue in issues: if not issue['comment']: issue_can_be_closed = False reason = make_comment(issue, issue['reason']) comment += f'{reason}\n' else: comment += f'{issue["comment"]}\n' if not comment: comment = 'No problem was found' jql = (f'project = SOX AND ' f'summary ~ "JobSummary_\\\\[{service_name}\\\\]" AND ' f'created >= startOfDay()') issue = jira.search_issues(jql, validate_query=True)[0] issue_status = issue.fields.status.name.lower() if issue_status == 'open': jira.add_comment(issue.key, comment) comment = comment.replace('\n', '|') if issue_can_be_closed: # list of transitions /rest/api/2/issue/${issueIdOrKey}/transitions jira.transition_issue(issue=issue.key, transition='Close') logger.info(f'{service_name} ({issue.key}) has been closed') else: logger.info( f'{service_name} ({issue.key}) has already been closed') jira.close() commvault.logout()
#!/usr/bin/python3.6 # -*- coding: utf-8 -*- from cvpysdk.commcell import Commcell import sys import re import os import pymongo import ast <<<<<<< HEAD commcell = Commcell('CommServer', 'domian\user', 'password') ======= commcell = Commcell('CommServer', 'domian\user', 'password') >>>>>>> e106143aadcc008309a5299b0b0ee96893c6cce6 client = pymongo.MongoClient("localhost", 27017) client.TeamSite.authenticate('user', 'password') db = client["TeamSite"] col = db["backups"] col.remove({}) <<<<<<< HEAD """with open(r"./server","r") as failed: SRVLIST = failed.read().split('test') del SRVLIST[-1] for fail in SRVLIST: col.insert_one(ast.literal_eval(fail.rstrip())) """ with open(r"./server","r") as failed: SRVLIST = failed.read().split('test') del SRVLIST[-1]
from cvpysdk.commcell import Commcell _cs_obj = Commcell("zeus.idcprodcert.loc", "admin", "") _client_obj = _cs_obj.clients.get("HvAutoSource") _agent_obj = _client_obj.agents.get('Virtual Server') _instancekeys = next(iter(_agent_obj.instances._instances)) _instance_obj = _agent_obj.instances.get(_instancekeys) # print(_instance_obj.server_name) # _instance_obj.associated_clients = ['hvidc1','hvidc2'] _backupset_obj = _instance_obj.backupsets.get('defaultbackupset') _sub_obj = _backupset_obj.subclients.get('AutomationCrash') print(_sub_obj.content) print(_sub_obj.storage_policy) # """ _Restore_Job = _sub_obj.guest_file_restore( "V11AutoLin", "/centos-root/FULL/TestData", destination_path="D:\\CVAutomationRestore") print(_Restore_Job.job_id) if not _Restore_Job.wait_for_completion(): raise Exception("Failed to run restore out of place job with error: " + str(_Restore_Job.delay_reason)) """ #""" _Restore_Job = _sub_obj.disk_restore("RegularAuto12", "hvidc1", "D:\\CVAutomation") print(_Restore_Job.job_id) if not _Restore_Job.wait_for_completion(): raise Exception("Failed to run restore out of place job with error: " + str(_Restore_Job.delay_reason))