예제 #1
0
def enable():
    global backup_logger,hutil,error_msg,para_parser
    hutil.do_parse_context('Enable')
    try:
        backup_logger.log('starting to enable', True)

        # handle the restoring scenario.
        mi = MachineIdentity()
        stored_identity = mi.stored_identity()
        if(stored_identity is None):
            mi.save_identity()
        else:
            current_identity = mi.current_identity()
            if(current_identity != stored_identity):
                current_seq_no = -1
                backup_logger.log("machine identity not same, set current_seq_no to " + str(current_seq_no) + " " + str(stored_identity) + " " + str(current_identity), True)
                hutil.set_last_seq(current_seq_no)
                mi.save_identity()

        hutil.exit_if_same_seq()
        hutil.save_seq()

        """
        protectedSettings is the privateConfig passed from Powershell.
        WATCHOUT that, the _context_config are using the most freshest timestamp.
        if the time sync is alive, this should be right.
        """
        protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')
        public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
        para_parser = ParameterParser(protected_settings, public_settings)

        if(para_parser.commandStartTimeUTCTicks is not None and para_parser.commandStartTimeUTCTicks != ""):
            utcTicksLong = long(para_parser.commandStartTimeUTCTicks)
            backup_logger.log('utcTicks in long format' + str(utcTicksLong), True)
            commandStartTime = convert_time(utcTicksLong)
            utcNow = datetime.datetime.utcnow()
            backup_logger.log('command start time is ' + str(commandStartTime) + " and utcNow is " + str(utcNow))
            timespan = utcNow - commandStartTime
            THIRTY_MINUTES = 30 * 60 # in seconds
            # handle the machine identity for the restoration scenario.
            total_span_in_seconds = timedelta_total_seconds(timespan)
            backup_logger.log('timespan is ' + str(timespan) + ' ' + str(total_span_in_seconds))
            if(abs(total_span_in_seconds) > THIRTY_MINUTES):
                error_msg = 'the call time stamp is out of date. so skip it.'
                exit_with_commit_log(error_msg, para_parser)

        if(para_parser.taskId is not None and para_parser.taskId != ""):
            taskIdentity = TaskIdentity()
            taskIdentity.save_identity(para_parser.taskId)
        temp_status= 'transitioning'
        temp_result=CommonVariables.success
        temp_msg='Transitioning state in enable'
        status_report(temp_status,temp_result,temp_msg)
        hutil.do_status_report('Enable', temp_status, str(temp_result), temp_msg)
        start_daemon();
    except Exception as e:
        errMsg = 'Failed to call the daemon with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
        backup_logger.log(errMsg, False, 'Error')
        global_error_result = e
    def get_machine_id(self):
        machine_id_file = "/etc/azure/machine_identity_FD76C85E-406F-4CFA-8EB0-CF18B123358B"
        machine_id = ""
        try:
            if not os.path.exists(os.path.dirname(machine_id_file)):
                os.makedirs(os.path.dirname(machine_id_file))

            if os.path.exists(machine_id_file):
                file_pointer = open(machine_id_file, "r")
                machine_id = file_pointer.readline()
                file_pointer.close()
            else:
                mi = MachineIdentity()
                machine_id = mi.stored_identity()[1:-1]
                file_pointer = open(machine_id_file, "w")
                file_pointer.write(machine_id)
                file_pointer.close()
        except Exception as e:
            errMsg = 'Failed to retrieve the unique machine id with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
            self.log(errMsg, False, 'Error')
 
        self.log("Unique Machine Id  : {0}".format(machine_id))
        return machine_id
예제 #3
0
    def get_machine_id(self):
        machine_id_file = "/etc/azure/machine_identity_FD76C85E-406F-4CFA-8EB0-CF18B123358B"
        machine_id = ""
        try:
            if not os.path.exists(os.path.dirname(machine_id_file)):
                os.makedirs(os.path.dirname(machine_id_file))

            if os.path.exists(machine_id_file):
                file_pointer = open(machine_id_file, "r")
                machine_id = file_pointer.readline()
                file_pointer.close()
            else:
                mi = MachineIdentity()
                machine_id = mi.stored_identity()[1:-1]
                file_pointer = open(machine_id_file, "w")
                file_pointer.write(machine_id)
                file_pointer.close()
        except Exception as e:
            errMsg = 'Failed to retrieve the unique machine id with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
            self.log(errMsg, 'Error')
 
        self.log("Unique Machine Id  : {0}".format(machine_id))
        return machine_id
예제 #4
0
def enable():
    #this is using the most recent file timestamp.
    hutil.do_parse_context('Enable')

    freezer = FsFreezer(patching= MyPatching, logger = backup_logger)
    unfreeze_result = None
    snapshot_result = None
    freeze_result = None
    global_error_result = None
    para_parser = None
    run_result = 1
    error_msg = ''
    run_status = None
    # precheck
    freeze_called = False
    try:
        # we need to freeze the file system first
        backup_logger.log('starting to enable', True)

        # handle the restoring scenario.
        mi = MachineIdentity()
        stored_identity = mi.stored_identity()
        if(stored_identity is None):
            mi.save_identity()
        else:
            current_identity = mi.current_identity()
            if(current_identity != stored_identity):
                current_seq_no = -1
                backup_logger.log("machine identity not same, set current_seq_no to " + str(current_seq_no) + " " + str(stored_identity) + " " + str(current_identity), True)
                hutil.set_last_seq(current_seq_no)
                mi.save_identity()

        hutil.exit_if_same_seq()
        hutil.save_seq()

        """
        protectedSettings is the privateConfig passed from Powershell.
        WATCHOUT that, the _context_config are using the most freshest timestamp.
        if the time sync is alive, this should be right.
        """
        protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')
        public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
        para_parser = ParameterParser(protected_settings, public_settings)

        if(para_parser.commandStartTimeUTCTicks is not None and para_parser.commandStartTimeUTCTicks != ""):
            utcTicksLong = long(para_parser.commandStartTimeUTCTicks)
            commandStartTime = convert_time(utcTicksLong)
            utcNow = datetime.datetime.utcnow()
            backup_logger.log('command start time is ' + str(commandStartTime) + " and utcNow is " + str(utcNow))
            timespan = utcNow - commandStartTime
            THIRTY_MINUTES = 30 * 60 # in seconds
            # handle the machine identity for the restoration scenario.
            total_span_in_seconds = timedelta_total_seconds(timespan)
            backup_logger.log('timespan is ' + str(timespan) + ' ' + str(total_span_in_seconds))
            if(abs(total_span_in_seconds) > THIRTY_MINUTES):
                error_msg = 'the call time stamp is out of date. so skip it.'
                exit_with_commit_log(error_msg, para_parser)

        if(para_parser.taskId is not None and para_parser.taskId != ""):
            taskIdentity = TaskIdentity()
            taskIdentity.save_identity(para_parser.taskId)
        commandToExecute = para_parser.commandToExecute
        #validate all the required parameter here
        if(commandToExecute.lower() == CommonVariables.iaas_install_command):
            backup_logger.log('install succeed.',True)
            run_status = 'success'
            error_msg = 'Install Succeeded'
            run_result = CommonVariables.success
            backup_logger.log(error_msg)
        elif(commandToExecute.lower() == CommonVariables.iaas_vmbackup_command):
            if(para_parser.backup_metadata is None or para_parser.public_config_obj is None or para_parser.private_config_obj is None):
                run_result = CommonVariables.error_parameter
                run_status = 'error'
                error_msg = 'required field empty or not correct'
                backup_logger.log(error_msg, False, 'Error')
            else:
                backup_logger.log('commandToExecute is ' + commandToExecute, True)
                """
                make sure the log is not doing when the file system is freezed.
                """
                backup_logger.log('doing freeze now...', True)
                freeze_called = True
                freeze_result = freezer.freezeall()
                backup_logger.log('freeze result ' + str(freeze_result))

                # check whether we freeze succeed first?
                if(freeze_result is not None and len(freeze_result.errors) > 0):
                    run_result = CommonVariables.error
                    run_status = 'error'
                    error_msg = 'Enable failed with error: ' + str(freeze_result)
                    backup_logger.log(error_msg, False, 'Warning')
                else:
                    backup_logger.log('doing snapshot now...')
                    snap_shotter = Snapshotter(backup_logger)
                    snapshot_result = snap_shotter.snapshotall(para_parser)
                    backup_logger.log('snapshotall ends...')
                    if(snapshot_result is not None and len(snapshot_result.errors) > 0):
                        error_msg = 'snapshot result: ' + str(snapshot_result)
                        run_result = CommonVariables.error
                        run_status = 'error'
                        backup_logger.log(error_msg, False, 'Error')
                    else:
                        run_result = CommonVariables.success
                        run_status = 'success'
                        error_msg = 'Enable Succeeded'
                        backup_logger.log(error_msg)
        else:
            run_status = 'error'
            run_result = CommonVariables.error_parameter
            error_msg = 'command is not correct'
            backup_logger.log(error_msg, False, 'Error')
    except Exception as e:
        errMsg = 'Failed to enable the extension with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
        backup_logger.log(errMsg, False, 'Error')
        global_error_result = e
    finally:
        if(freeze_called):
            unfreeze_result = freezer.unfreezeall()
            backup_logger.log('unfreeze result ' + str(unfreeze_result))
            if(unfreeze_result is not None and len(unfreeze_result.errors) > 0):
                error_msg += ('Enable Succeeded with error: ' + str(unfreeze_result.errors))
                backup_logger.log(error_msg, False, 'Warning')
            backup_logger.log('unfreeze ends...')

    """
    we do the final report here to get rid of the complex logic to handle the logging when file system be freezed issue.
    """
    if(global_error_result is not None):
        if(hasattr(global_error_result,'errno') and global_error_result.errno == 2):
            run_result = CommonVariables.error_12
        elif(para_parser is None):
            run_result = CommonVariables.error_parameter
        else:
            run_result = CommonVariables.error
        run_status = 'error'
        error_msg  += ('Enable failed.' + str(global_error_result))

    if(para_parser is not None and para_parser.statusBlobUri is not None and para_parser.statusBlobUri != ""):
        status_report_msg = do_backup_status_report(operation='Enable',status = run_status,\
                                status_code=str(run_result), \
                                message=error_msg,\
                                taskId=para_parser.taskId,\
                                commandStartTimeUTCTicks=para_parser.commandStartTimeUTCTicks,\
                                blobUri=para_parser.statusBlobUri)
    if(status_report_msg is not None):
        backup_logger.log("status report message:")
        backup_logger.log(status_report_msg)
    else:
        backup_logger.log("status_report_msg is none")
    if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
        backup_logger.commit(para_parser.logsBlobUri)
    else:
        backup_logger.log("the logs blob uri is not there, so do not upload log.")
        backup_logger.commit_to_local()
    

    hutil.do_exit(0, 'Enable', run_status, str(run_result), error_msg)
예제 #5
0
def daemon():
    global MyPatching, backup_logger, hutil, run_result, run_status, error_msg, freezer, para_parser, snapshot_done
    #this is using the most recent file timestamp.
    hutil.do_parse_context('Executing')
    freezer = FsFreezer(patching=MyPatching, logger=backup_logger)
    global_error_result = None
    # precheck
    freeze_called = False

    configfile = '/etc/azure/vmbackup.conf'
    thread_timeout = str(60)
    try:
        config = ConfigParser.ConfigParser()
        config.read(configfile)
        thread_timeout = config.get('SnapshotThread', 'timeout')
    except Exception as e:
        errMsg = 'cannot read config file or file not present'
        backup_logger.log(errMsg, False, 'Warning')
    backup_logger.log("final thread timeout" + thread_timeout, True)

    try:
        # we need to freeze the file system first
        backup_logger.log('starting to enable', True)

        # handle the restoring scenario.
        mi = MachineIdentity()
        stored_identity = mi.stored_identity()
        if (stored_identity is None):
            mi.save_identity()
        else:
            current_identity = mi.current_identity()
            if (current_identity != stored_identity):
                current_seq_no = -1
                backup_logger.log(
                    "machine identity not same, set current_seq_no to " +
                    str(current_seq_no) + " " + str(stored_identity) + " " +
                    str(current_identity), True)
                hutil.set_last_seq(current_seq_no)
                mi.save_identity()

        hutil.exit_if_same_seq()
        hutil.save_seq()
        """
        protectedSettings is the privateConfig passed from Powershell.
        WATCHOUT that, the _context_config are using the most freshest timestamp.
        if the time sync is alive, this should be right.
        """
        protected_settings = hutil._context._config['runtimeSettings'][0][
            'handlerSettings'].get('protectedSettings')
        public_settings = hutil._context._config['runtimeSettings'][0][
            'handlerSettings'].get('publicSettings')
        para_parser = ParameterParser(protected_settings, public_settings)

        if (para_parser.commandStartTimeUTCTicks is not None
                and para_parser.commandStartTimeUTCTicks != ""):
            utcTicksLong = long(para_parser.commandStartTimeUTCTicks)
            backup_logger.log('utcTicks in long format' + str(utcTicksLong),
                              True)
            commandStartTime = convert_time(utcTicksLong)
            utcNow = datetime.datetime.utcnow()
            backup_logger.log('command start time is ' +
                              str(commandStartTime) + " and utcNow is " +
                              str(utcNow))
            timespan = utcNow - commandStartTime
            THIRTY_MINUTES = 30 * 60  # in seconds
            # handle the machine identity for the restoration scenario.
            total_span_in_seconds = timedelta_total_seconds(timespan)
            backup_logger.log('timespan is ' + str(timespan) + ' ' +
                              str(total_span_in_seconds))
            if (abs(total_span_in_seconds) > THIRTY_MINUTES):
                error_msg = 'the call time stamp is out of date. so skip it.'
                exit_with_commit_log(error_msg, para_parser)

        if (para_parser.taskId is not None and para_parser.taskId != ""):
            taskIdentity = TaskIdentity()
            taskIdentity.save_identity(para_parser.taskId)
        commandToExecute = para_parser.commandToExecute
        #validate all the required parameter here
        if (commandToExecute.lower() == CommonVariables.iaas_install_command):
            backup_logger.log('install succeed.', True)
            run_status = 'success'
            error_msg = 'Install Succeeded'
            run_result = CommonVariables.success
            backup_logger.log(error_msg)
        elif (commandToExecute.lower() == CommonVariables.iaas_vmbackup_command
              ):
            if (para_parser.backup_metadata is None
                    or para_parser.public_config_obj is None
                    or para_parser.private_config_obj is None):
                run_result = CommonVariables.error_parameter
                run_status = 'error'
                error_msg = 'required field empty or not correct'
                backup_logger.log(error_msg, False, 'Error')
            else:
                backup_logger.log('commandToExecute is ' + commandToExecute,
                                  True)
                """
                make sure the log is not doing when the file system is freezed.
                """
                temp_status = 'transitioning'
                temp_result = CommonVariables.success
                temp_msg = 'Transitioning state in extension'
                trans_report_msg = None
                if (para_parser is not None
                        and para_parser.statusBlobUri is not None
                        and para_parser.statusBlobUri != ""):
                    trans_report_msg = do_backup_status_report(operation='Enable',status=temp_status,\
                                    status_code=str(temp_result),\
                                    message=temp_msg,\
                                    taskId=para_parser.taskId,\
                                    commandStartTimeUTCTicks=para_parser.commandStartTimeUTCTicks,\
                                    blobUri=para_parser.statusBlobUri)
                    if (trans_report_msg is not None):
                        backup_logger.log("trans status report message:")
                        backup_logger.log(trans_report_msg)
                    else:
                        backup_logger.log("trans_report_msg is none")
                hutil.do_status_report('Enable', temp_status, str(temp_result),
                                       temp_msg)
                backup_logger.log('doing freeze now...', True)
                snapshot_thread = Thread(target=snapshot)
                start_time = datetime.datetime.utcnow()
                snapshot_thread.start()
                snapshot_thread.join(float(thread_timeout))
                if not snapshot_done:
                    run_result = CommonVariables.error
                    run_status = 'error'
                    error_msg = 'T:W Snapshot timeout'
                    backup_logger.log(error_msg, False, 'Warning')

                end_time = datetime.datetime.utcnow()
                time_taken = end_time - start_time
                backup_logger.log('total time taken..' + str(time_taken))

                for i in range(0, 3):
                    unfreeze_result = freezer.unfreezeall()
                    backup_logger.log('unfreeze result ' +
                                      str(unfreeze_result))
                    if (unfreeze_result is not None):
                        if len(unfreeze_result.errors) > 0:
                            error_msg += ('unfreeze with error: ' +
                                          str(unfreeze_result.errors))
                            backup_logger.log(error_msg, False, 'Warning')
                        else:
                            backup_logger.log('unfreeze result is None')
                            break
                backup_logger.log('unfreeze ends...')

        else:
            run_status = 'error'
            run_result = CommonVariables.error_parameter
            error_msg = 'command is not correct'
            backup_logger.log(error_msg, False, 'Error')
    except Exception as e:
        errMsg = 'Failed to enable the extension with error: %s, stack trace: %s' % (
            str(e), traceback.format_exc())
        backup_logger.log(errMsg, False, 'Error')
        global_error_result = e
    """
    we do the final report here to get rid of the complex logic to handle the logging when file system be freezed issue.
    """
    if (global_error_result is not None):
        if (hasattr(global_error_result, 'errno')
                and global_error_result.errno == 2):
            run_result = CommonVariables.error_12
        elif (para_parser is None):
            run_result = CommonVariables.error_parameter
        else:
            run_result = CommonVariables.error
        run_status = 'error'
        error_msg += ('Enable failed.' + str(global_error_result))
    status_report_msg = None
    if (para_parser is not None and para_parser.statusBlobUri is not None
            and para_parser.statusBlobUri != ""):
        status_report_msg = do_backup_status_report(operation='Enable',status=run_status,\
                                status_code=str(run_result),\
                                message=error_msg,\
                                taskId=para_parser.taskId,\
                                commandStartTimeUTCTicks=para_parser.commandStartTimeUTCTicks,\
                                blobUri=para_parser.statusBlobUri)
    if (status_report_msg is not None):
        backup_logger.log("status report message:")
        backup_logger.log(status_report_msg)
    else:
        backup_logger.log("status_report_msg is none")
    if (para_parser is not None and para_parser.logsBlobUri is not None
            and para_parser.logsBlobUri != ""):
        backup_logger.commit(para_parser.logsBlobUri)
    else:
        backup_logger.log(
            "the logs blob uri is not there, so do not upload log.")
        backup_logger.commit_to_local()

    hutil.do_exit(0, 'Enable', run_status, str(run_result), error_msg)
예제 #6
0
def daemon():
    global MyPatching, backup_logger, hutil, run_result, run_status, error_msg, freezer, para_parser, snapshot_done, snapshot_info_array, g_fsfreeze_on, total_used_size, patch_class_name, orig_distro, workload_patch
    #this is using the most recent file timestamp.
    hutil.do_parse_context('Executing')

    try:
        backup_logger.log('starting daemon', True)
        backup_logger.log(
            "patch_class_name: " + str(patch_class_name) +
            " and orig_distro: " + str(orig_distro), True)
        # handle the restoring scenario.
        mi = MachineIdentity()
        stored_identity = mi.stored_identity()
        if (stored_identity is None):
            mi.save_identity()
        else:
            current_identity = mi.current_identity()
            if (current_identity != stored_identity):
                current_seq_no = -1
                backup_logger.log(
                    "machine identity not same, set current_seq_no to " +
                    str(current_seq_no) + " " + str(stored_identity) + " " +
                    str(current_identity), True)
                hutil.set_last_seq(current_seq_no)
                mi.save_identity()
    except Exception as e:
        errMsg = 'Failed to validate sequence number with error: %s, stack trace: %s' % (
            str(e), traceback.format_exc())
        backup_logger.log(errMsg, True, 'Error')

    freezer = FsFreezer(patching=MyPatching, logger=backup_logger, hutil=hutil)
    global_error_result = None
    # precheck
    freeze_called = False
    configfile = '/etc/azure/vmbackup.conf'
    thread_timeout = str(60)
    snapshot_type = "appAndFileSystem"

    #Adding python version to the telemetry
    try:
        python_version_info = sys.version_info
        python_version = str(sys.version_info[0]) + '.' + str(
            sys.version_info[1]) + '.' + str(sys.version_info[2])
        HandlerUtil.HandlerUtility.add_to_telemetery_data(
            "pythonVersion", python_version)
    except Exception as e:
        errMsg = 'Failed to do retrieve python version with error: %s, stack trace: %s' % (
            str(e), traceback.format_exc())
        backup_logger.log(errMsg, True, 'Error')

    #fetching platform architecture
    try:
        architecture = platform.architecture()[0]
        HandlerUtil.HandlerUtility.add_to_telemetery_data(
            "platformArchitecture", architecture)
    except Exception as e:
        errMsg = 'Failed to do retrieve "platform architecture" with error: %s, stack trace: %s' % (
            str(e), traceback.format_exc())
        backup_logger.log(errMsg, True, 'Error')

    try:
        if (freezer.mounts is not None):
            hutil.partitioncount = len(freezer.mounts.mounts)
        backup_logger.log(" configfile " + str(configfile), True)
        config = ConfigParsers.ConfigParser()
        config.read(configfile)
        if config.has_option('SnapshotThread', 'timeout'):
            thread_timeout = config.get('SnapshotThread', 'timeout')
        if config.has_option('SnapshotThread', 'snapshot_type'):
            snapshot_type = config.get(
                'SnapshotThread',
                'snapshot_type')  #values - appOnly, appAndFileSystem
    except Exception as e:
        errMsg = 'cannot read config file or file not present'
        backup_logger.log(errMsg, True, 'Warning')
    backup_logger.log("final thread timeout" + thread_timeout, True)

    snapshot_info_array = None

    try:
        # we need to freeze the file system first
        backup_logger.log('starting daemon', True)
        """
        protectedSettings is the privateConfig passed from Powershell.
        WATCHOUT that, the _context_config are using the most freshest timestamp.
        if the time sync is alive, this should be right.
        """
        protected_settings = hutil._context._config['runtimeSettings'][0][
            'handlerSettings'].get('protectedSettings', {})
        public_settings = hutil._context._config['runtimeSettings'][0][
            'handlerSettings'].get('publicSettings')
        para_parser = ParameterParser(protected_settings, public_settings,
                                      backup_logger)
        hutil.update_settings_file()

        if (bool(public_settings) == False and not protected_settings):
            error_msg = "unable to load certificate"
            hutil.SetExtErrorCode(
                ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.
                FailedHandlerGuestAgentCertificateNotFound)
            temp_result = CommonVariables.FailedHandlerGuestAgentCertificateNotFound
            temp_status = 'error'
            exit_with_commit_log(temp_status, temp_result, error_msg,
                                 para_parser)

        if (para_parser.commandStartTimeUTCTicks is not None
                and para_parser.commandStartTimeUTCTicks != ""):
            utcTicksLong = int(para_parser.commandStartTimeUTCTicks)
            backup_logger.log('utcTicks in long format' + str(utcTicksLong),
                              True)
            commandStartTime = convert_time(utcTicksLong)
            utcNow = datetime.datetime.utcnow()
            backup_logger.log(
                'command start time is ' + str(commandStartTime) +
                " and utcNow is " + str(utcNow), True)
            timespan = utcNow - commandStartTime
            MAX_TIMESPAN = 140 * 60  # in seconds
            # handle the machine identity for the restoration scenario.
            total_span_in_seconds = timedelta_total_seconds(timespan)
            backup_logger.log('timespan is ' + str(timespan) + ' ' +
                              str(total_span_in_seconds))

            if total_span_in_seconds > MAX_TIMESPAN:
                error_msg = "CRP timeout limit has reached, will not take snapshot."
                errMsg = error_msg
                hutil.SetExtErrorCode(
                    ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.
                    FailedGuestAgentInvokedCommandTooLate)
                temp_result = CommonVariables.FailedGuestAgentInvokedCommandTooLate
                temp_status = 'error'
                exit_with_commit_log(temp_status, temp_result, error_msg,
                                     para_parser)

        if (para_parser.taskId is not None and para_parser.taskId != ""):
            backup_logger.log('taskId: ' + str(para_parser.taskId), True)
            exit_if_same_taskId(para_parser.taskId)
            taskIdentity = TaskIdentity()
            taskIdentity.save_identity(para_parser.taskId)

        hutil.save_seq()

        commandToExecute = para_parser.commandToExecute
        #validate all the required parameter here
        backup_logger.log(commandToExecute, True)
        if (CommonVariables.iaas_install_command in commandToExecute.lower()):
            backup_logger.log('install succeed.', True)
            run_status = 'success'
            error_msg = 'Install Succeeded'
            run_result = CommonVariables.success
            backup_logger.log(error_msg)
        elif (CommonVariables.iaas_vmbackup_command
              in commandToExecute.lower()):
            if (para_parser.backup_metadata is None
                    or para_parser.public_config_obj is None):
                run_result = CommonVariables.error_parameter
                hutil.SetExtErrorCode(ExtensionErrorCodeHelper.
                                      ExtensionErrorCodeEnum.error_parameter)
                run_status = 'error'
                error_msg = 'required field empty or not correct'
                backup_logger.log(error_msg, True, 'Error')
            else:
                backup_logger.log('commandToExecute is ' + commandToExecute,
                                  True)
                """
                make sure the log is not doing when the file system is freezed.
                """
                temp_status = 'success'
                temp_result = CommonVariables.ExtensionTempTerminalState
                temp_msg = 'Transitioning state in extension'
                blob_report_msg, file_report_msg = get_status_to_report(
                    temp_status, temp_result, temp_msg, None)
                status_report_to_file(file_report_msg)
                status_report_to_blob(blob_report_msg)
                #partial logging before freeze
                if (para_parser is not None
                        and para_parser.logsBlobUri is not None
                        and para_parser.logsBlobUri != ""):
                    backup_logger.commit_to_blob(para_parser.logsBlobUri)
                else:
                    backup_logger.log(
                        "the logs blob uri is not there, so do not upload log."
                    )
                backup_logger.log('commandToExecute is ' + commandToExecute,
                                  True)

                workload_patch = WorkloadPatch.WorkloadPatch(backup_logger)
                #new flow only if workload name is present in workload.conf
                if workload_patch.name != None:
                    backup_logger.log(
                        "workload backup enabled for workload: " +
                        workload_patch.name, True)
                    pre_skipped = False
                    if len(workload_patch.error_details) > 0:
                        backup_logger.log("skip pre and post")
                        pre_skipped = True
                    else:
                        workload_patch.pre()
                    if len(workload_patch.error_details) > 0:
                        backup_logger.log("file system consistent backup only")
                    #todo error handling
                    if snapshot_type == "appOnly":
                        g_fsfreeze_on = False
                    else:
                        g_fsfreeze_on = True
                    freeze_snapshot(thread_timeout)
                    if pre_skipped == False:
                        workload_patch.post()
                    workload_error = workload_patch.populateErrors()
                    if workload_error != None and g_fsfreeze_on == False:
                        run_status = 'error'
                        run_result = workload_error.errorCode
                        hutil.SetExtErrorCode(workload_error.errorCode)
                        error_msg = 'Workload Patch failed with error message: ' + workload_error.errorMsg
                        error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(
                            hutil.ExtErrorCode)
                        backup_logger.log(error_msg, True)
                    elif workload_error != None and g_fsfreeze_on == True:
                        hutil.SetExtErrorCode(workload_error.errorCode)
                        error_msg = 'Workload Patch failed with warning message: ' + workload_error.errorMsg
                        error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(
                            hutil.ExtErrorCode)
                        backup_logger.log(error_msg, True)
                    else:
                        if (run_status == CommonVariables.status_success):
                            run_status = 'success'
                            run_result = CommonVariables.success_appconsistent
                            hutil.SetExtErrorCode(
                                ExtensionErrorCodeHelper.
                                ExtensionErrorCodeEnum.success_appconsistent)
                            error_msg = 'Enable Succeeded with App Consistent Snapshot'
                            backup_logger.log(error_msg, True)
                        else:
                            error_msg = 'Enable failed in fsfreeze snapshot flow'
                            backup_logger.log(error_msg, True)
                else:
                    PluginHostObj = PluginHost(logger=backup_logger)
                    PluginHostErrorCode, dobackup, g_fsfreeze_on = PluginHostObj.pre_check(
                    )
                    doFsConsistentbackup = False
                    appconsistentBackup = False

                    if not (PluginHostErrorCode == CommonVariables.
                            FailedPrepostPluginhostConfigParsing
                            or PluginHostErrorCode
                            == CommonVariables.FailedPrepostPluginConfigParsing
                            or PluginHostErrorCode == CommonVariables.
                            FailedPrepostPluginhostConfigNotFound
                            or PluginHostErrorCode == CommonVariables.
                            FailedPrepostPluginhostConfigPermissionError
                            or PluginHostErrorCode ==
                            CommonVariables.FailedPrepostPluginConfigNotFound):
                        backup_logger.log(
                            'App Consistent Consistent Backup Enabled', True)
                        HandlerUtil.HandlerUtility.add_to_telemetery_data(
                            "isPrePostEnabled", "true")
                        appconsistentBackup = True

                    if (PluginHostErrorCode !=
                            CommonVariables.PrePost_PluginStatus_Success):
                        backup_logger.log(
                            'Triggering File System Consistent Backup because of error code'
                            +
                            ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.
                            StatusCodeStringBuilder(PluginHostErrorCode), True)
                        doFsConsistentbackup = True

                    preResult = PluginHostResult()
                    postResult = PluginHostResult()

                    if not doFsConsistentbackup:
                        preResult = PluginHostObj.pre_script()
                        dobackup = preResult.continueBackup

                        if (g_fsfreeze_on == False
                                and preResult.anyScriptFailed):
                            dobackup = False

                    if dobackup:
                        freeze_snapshot(thread_timeout)

                    if not doFsConsistentbackup:
                        postResult = PluginHostObj.post_script()
                        if not postResult.continueBackup:
                            dobackup = False

                        if (g_fsfreeze_on == False
                                and postResult.anyScriptFailed):
                            dobackup = False

                    if not dobackup:
                        if run_result == CommonVariables.success and PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success:
                            run_status = 'error'
                            run_result = PluginHostErrorCode
                            hutil.SetExtErrorCode(PluginHostErrorCode)
                            error_msg = 'Plugin Host Precheck Failed'
                            error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(
                                hutil.ExtErrorCode)
                            backup_logger.log(error_msg, True)

                        if run_result == CommonVariables.success:
                            pre_plugin_errors = preResult.errors
                            for error in pre_plugin_errors:
                                if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
                                    run_status = 'error'
                                    run_result = error.errorCode
                                    hutil.SetExtErrorCode(error.errorCode)
                                    error_msg = 'PreScript failed for the plugin ' + error.pluginName
                                    error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(
                                        hutil.ExtErrorCode)
                                    backup_logger.log(error_msg, True)
                                    break

                        if run_result == CommonVariables.success:
                            post_plugin_errors = postResult.errors
                            for error in post_plugin_errors:
                                if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
                                    run_status = 'error'
                                    run_result = error.errorCode
                                    hutil.SetExtErrorCode(error.errorCode)
                                    error_msg = 'PostScript failed for the plugin ' + error.pluginName
                                    error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(
                                        hutil.ExtErrorCode)
                                    backup_logger.log(error_msg, True)
                                    break

                    if appconsistentBackup:
                        if (PluginHostErrorCode !=
                                CommonVariables.PrePost_PluginStatus_Success):
                            hutil.SetExtErrorCode(PluginHostErrorCode)
                        pre_plugin_errors = preResult.errors
                        for error in pre_plugin_errors:
                            if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
                                hutil.SetExtErrorCode(error.errorCode)
                        post_plugin_errors = postResult.errors
                        for error in post_plugin_errors:
                            if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
                                hutil.SetExtErrorCode(error.errorCode)

                    if run_result == CommonVariables.success and not doFsConsistentbackup and not (
                            preResult.anyScriptFailed
                            or postResult.anyScriptFailed):
                        run_status = 'success'
                        run_result = CommonVariables.success_appconsistent
                        hutil.SetExtErrorCode(
                            ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.
                            success_appconsistent)
                        error_msg = 'Enable Succeeded with App Consistent Snapshot'
                        backup_logger.log(error_msg, True)

        else:
            run_status = 'error'
            run_result = CommonVariables.error_parameter
            hutil.SetExtErrorCode(ExtensionErrorCodeHelper.
                                  ExtensionErrorCodeEnum.error_parameter)
            error_msg = 'command is not correct'
            backup_logger.log(error_msg, True, 'Error')
    except Exception as e:
        hutil.update_settings_file()
        errMsg = 'Failed to enable the extension with error: %s, stack trace: %s' % (
            str(e), traceback.format_exc())
        backup_logger.log(errMsg, True, 'Error')
        global_error_result = e
    """
    we do the final report here to get rid of the complex logic to handle the logging when file system be freezed issue.
    """
    try:
        if (global_error_result is not None):
            if (hasattr(global_error_result, 'errno')
                    and global_error_result.errno == 2):
                run_result = CommonVariables.error_12
                hutil.SetExtErrorCode(
                    ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_12)
            elif (para_parser is None):
                run_result = CommonVariables.error_parameter
                hutil.SetExtErrorCode(ExtensionErrorCodeHelper.
                                      ExtensionErrorCodeEnum.error_parameter)
            else:
                run_result = CommonVariables.error
                hutil.SetExtErrorCode(
                    ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)
            run_status = 'error'
            error_msg += ('Enable failed.' + str(global_error_result))
        status_report_msg = None
        hutil.SetExtErrorCode(
            run_result
        )  #setting extension errorcode at the end if missed somewhere
        HandlerUtil.HandlerUtility.add_to_telemetery_data(
            "extErrorCode",
            str(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.
                ExtensionErrorCodeNameDict[hutil.ExtErrorCode]))
        total_used_size = -1
        blob_report_msg, file_report_msg = get_status_to_report(
            run_status, run_result, error_msg, snapshot_info_array)
        if (hutil.is_status_file_exists()):
            status_report_to_file(file_report_msg)
        status_report_to_blob(blob_report_msg)
    except Exception as e:
        errMsg = 'Failed to log status in extension'
        backup_logger.log(errMsg, True, 'Error')
    if (para_parser is not None and para_parser.logsBlobUri is not None
            and para_parser.logsBlobUri != ""):
        backup_logger.commit(para_parser.logsBlobUri)
    else:
        backup_logger.log(
            "the logs blob uri is not there, so do not upload log.")
        backup_logger.commit_to_local()

    sys.exit(0)
예제 #7
0
def enable():
    global backup_logger,hutil,error_msg,para_parser
    hutil.do_parse_context('Enable')
    try:
        backup_logger.log('starting to enable', True)
        # handle the restoring scenario.
        mi = MachineIdentity()
        stored_identity = mi.stored_identity()
        if(stored_identity is None):
            mi.save_identity()
        else:
            current_identity = mi.current_identity()
            if(current_identity != stored_identity):
                current_seq_no = -1
                backup_logger.log("machine identity not same, set current_seq_no to " + str(current_seq_no) + " " + str(stored_identity) + " " + str(current_identity), True)
                hutil.set_last_seq(current_seq_no)
                mi.save_identity()
        hutil.exit_if_same_seq()

        """
        protectedSettings is the privateConfig passed from Powershell.
        WATCHOUT that, the _context_config are using the most freshest timestamp.
        if the time sync is alive, this should be right.
        """
        protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')
        public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
        para_parser = ParameterParser(protected_settings, public_settings)

        if(bool(public_settings) and not protected_settings): #Protected settings decryption failed case
            error_msg = "unable to load certificate"
            hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedHandlerGuestAgentCertificateNotFound)
            temp_result=CommonVariables.FailedHandlerGuestAgentCertificateNotFound
            temp_status= 'error'
            exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)

        if(para_parser.commandStartTimeUTCTicks is not None and para_parser.commandStartTimeUTCTicks != ""):
            utcTicksLong = int(para_parser.commandStartTimeUTCTicks)
            backup_logger.log('utcTicks in long format' + str(utcTicksLong), True)
            commandStartTime = convert_time(utcTicksLong)
            utcNow = datetime.datetime.utcnow()
            backup_logger.log('command start time is ' + str(commandStartTime) + " and utcNow is " + str(utcNow), True)
            timespan = utcNow - commandStartTime
            MAX_TIMESPAN = 150 * 60 # in seconds
            # handle the machine identity for the restoration scenario.
            total_span_in_seconds = timedelta_total_seconds(timespan)
            backup_logger.log('timespan is ' + str(timespan) + ' ' + str(total_span_in_seconds))

        if(para_parser.taskId is not None and para_parser.taskId != ""):
            backup_logger.log('taskId: ' + str(para_parser.taskId), True)
            exit_if_same_taskId(para_parser.taskId) 
            taskIdentity = TaskIdentity()
            taskIdentity.save_identity(para_parser.taskId)
        hutil.save_seq()
        temp_status= 'transitioning'
        temp_result=CommonVariables.success
        temp_msg='Transitioning state in enable'
        blob_report_msg, file_report_msg = get_status_to_report(temp_status, temp_result, temp_msg, None)
        file_status_upload_thread=Thread(target=status_report_to_file, args=(file_report_msg,))
        file_status_upload_thread.start()
        blob_status_upload_thread=Thread(target=status_report_to_blob, args=(blob_report_msg,))
        blob_status_upload_thread.start()
        if(hutil.is_prev_in_transition()):
            backup_logger.log('retrieving the previous logs for this', True)
            backup_logger.set_prev_log()
        if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
            log_upload_thread=Thread(target=thread_for_log_upload)
            log_upload_thread.start()
            log_upload_thread.join(60)
        file_status_upload_thread.join(30)
        blob_status_upload_thread.join(60)
        start_daemon();
        sys.exit(0)
    except Exception as e:
        errMsg = 'Failed to call the daemon with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
        backup_logger.log(errMsg, True, 'Error')
        global_error_result = e
        temp_status= 'error'
        temp_result=CommonVariables.error
        hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)
        error_msg = 'Failed to call the daemon'
        exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)
def enable():
    #this is using the most recent file timestamp.
    hutil.do_parse_context('Enable')

    freezer = FsFreezer(backup_logger)
    unfreeze_result = None
    snapshot_result = None
    freeze_result = None
    global_error_result = None
    para_parser = None
    run_result = 1
    error_msg = ''
    run_status = None
    # precheck
    freeze_called = False
    try:
        # we need to freeze the file system first
        backup_logger.log('starting to enable', True)

        # handle the restoring scenario.
        mi = MachineIdentity()
        stored_identity = mi.stored_identity()
        if (stored_identity is None):
            mi.save_identity()
        else:
            current_identity = mi.current_identity()
            if (current_identity != stored_identity):
                current_seq_no = -1
                backup_logger.log(
                    "machine identity not same, set current_seq_no to " +
                    str(current_seq_no) + " " + str(stored_identity) + " " +
                    str(current_identity), True)
                hutil.set_inused_config_seq(current_seq_no)
                mi.save_identity()

        hutil.save_seq()
        """
        protectedSettings is the privateConfig passed from Powershell.
        WATCHOUT that, the _context_config are using the most freshest timestamp.
        if the time sync is alive, this should be right.
        """
        protected_settings = hutil._context._config['runtimeSettings'][0][
            'handlerSettings'].get('protectedSettings')
        public_settings = hutil._context._config['runtimeSettings'][0][
            'handlerSettings'].get('publicSettings')
        para_parser = ParameterParser(protected_settings, public_settings)

        if (para_parser.commandStartTimeUTCTicks is not None
                and para_parser.commandStartTimeUTCTicks != ""):
            utcTicksLong = long(para_parser.commandStartTimeUTCTicks)
            commandStartTime = convert_time(utcTicksLong)
            utcNow = datetime.datetime.utcnow()
            backup_logger.log('command start time is ' +
                              str(commandStartTime) + " and utcNow is " +
                              str(utcNow))
            timespan = utcNow - commandStartTime
            THIRTY_MINUTES = 30 * 60  # in seconds
            # handle the machine identity for the restoration scenario.
            total_span_in_seconds = timespan.days * 24 * 60 * 60 + timespan.seconds
            backup_logger.log('timespan is ' + str(timespan) + ' ' +
                              str(total_span_in_seconds))
            if (abs(total_span_in_seconds) > THIRTY_MINUTES):
                error_msg = 'the call time stamp is out of date. so skip it.'
                exit_with_commit_log(error_msg, para_parser)

        if (para_parser.taskId is not None and para_parser.taskId != ""):
            taskIdentity = TaskIdentity()
            taskIdentity.save_identity(para_parser.taskId)
        commandToExecute = para_parser.commandToExecute
        #validate all the required parameter here
        if (commandToExecute.lower() == CommonVariables.iaas_install_command):
            backup_logger.log('install succeed.', True)
            run_status = 'success'
            error_msg = 'Install Succeeded'
            run_result = CommonVariables.success
            backup_logger.log(error_msg)
        elif (commandToExecute.lower() == CommonVariables.iaas_vmbackup_command
              ):
            if (para_parser.backup_metadata is None
                    or para_parser.public_config_obj is None
                    or para_parser.private_config_obj is None):
                run_result = CommonVariables.error_parameter
                run_status = 'error'
                error_msg = 'required field empty or not correct'
                backup_logger.log(error_msg, False, 'Error')
            else:
                backup_logger.log('commandToExecute is ' + commandToExecute,
                                  True)
                """
                make sure the log is not doing when the file system is freezed.
                """
                backup_logger.log('doing freeze now...', True)
                freeze_called = True
                freeze_result = freezer.freezeall()
                backup_logger.log('freeze result ' + str(freeze_result))

                # check whether we freeze succeed first?
                if (freeze_result is not None
                        and len(freeze_result.errors) > 0):
                    run_result = CommonVariables.error
                    run_status = 'error'
                    error_msg = 'Enable failed with error: ' + str(
                        freeze_result)
                    backup_logger.log(error_msg, False, 'Warning')
                else:
                    backup_logger.log('doing snapshot now...')
                    snap_shotter = Snapshotter(backup_logger)
                    snapshot_result = snap_shotter.snapshotall(para_parser)
                    backup_logger.log('snapshotall ends...')
                    if (snapshot_result is not None
                            and len(snapshot_result.errors) > 0):
                        error_msg = 'snapshot result: ' + str(snapshot_result)
                        run_result = CommonVariables.error
                        run_status = 'error'
                        backup_logger.log(error_msg, False, 'Error')
                    else:
                        run_result = CommonVariables.success
                        run_status = 'success'
                        error_msg = 'Enable Succeeded'
                        backup_logger.log(error_msg)
        else:
            run_status = 'error'
            run_result = CommonVariables.error_parameter
            error_msg = 'command is not correct'
            backup_logger.log(error_msg, False, 'Error')
    except Exception as e:
        errMsg = 'Failed to enable the extension with error: %s, stack trace: %s' % (
            str(e), traceback.format_exc())
        backup_logger.log(errMsg, False, 'Error')
        global_error_result = e
    finally:
        backup_logger.log('doing unfreeze now...')
        if (freeze_called):
            unfreeze_result = freezer.unfreezeall()
            backup_logger.log('unfreeze result ' + str(unfreeze_result))
            if (unfreeze_result is not None
                    and len(unfreeze_result.errors) > 0):
                error_msg += ('Enable Succeeded with error: ' +
                              str(unfreeze_result.errors))
                backup_logger.log(error_msg, False, 'Warning')
            backup_logger.log('unfreeze ends...')

    if (para_parser is not None and para_parser.logsBlobUri is not None):
        backup_logger.commit(para_parser.logsBlobUri)
    else:
        backup_logger.commit_to_local()
    """
    we do the final report here to get rid of the complex logic to handle the logging when file system be freezed issue.
    """
    if (global_error_result is not None):
        if (hasattr(global_error_result, 'errno')
                and global_error_result.errno == 2):
            run_result = CommonVariables.error_12
        elif (para_parser is None):
            run_result = CommonVariables.error_parameter
        else:
            run_result = CommonVariables.error
        run_status = 'error'
        error_msg += ('Enable failed.' + str(global_error_result))

    if (para_parser is not None and para_parser.statusBlobUri is not None):
        do_backup_status_report(operation='Enable',status = run_status,\
                                status_code=str(run_result), \
                                message=error_msg,\
                                taskId=para_parser.taskId,\
                                commandStartTimeUTCTicks=para_parser.commandStartTimeUTCTicks,\
                                blobUri=para_parser.statusBlobUri)

    hutil.do_exit(0, 'Enable', run_status, str(run_result), error_msg)
예제 #9
0
def daemon():
    global MyPatching,backup_logger,hutil,run_result,run_status,error_msg,freezer,para_parser,snapshot_done,snapshot_info_array,g_fsfreeze_on,total_used_size
    #this is using the most recent file timestamp.
    hutil.do_parse_context('Executing')

    try:
        backup_logger.log('starting daemon', True)
        # handle the restoring scenario.
        mi = MachineIdentity()
        stored_identity = mi.stored_identity()
        if(stored_identity is None):
            mi.save_identity()
        else:
            current_identity = mi.current_identity()
            if(current_identity != stored_identity):
                current_seq_no = -1
                backup_logger.log("machine identity not same, set current_seq_no to " + str(current_seq_no) + " " + str(stored_identity) + " " + str(current_identity), True)
                hutil.set_last_seq(current_seq_no)
                mi.save_identity()
    except Exception as e:
        errMsg = 'Failed to validate sequence number with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
        backup_logger.log(errMsg, True, 'Error')

    freezer = FsFreezer(patching= MyPatching, logger = backup_logger, hutil = hutil)
    global_error_result = None
    # precheck
    freeze_called = False
    configfile='/etc/azure/vmbackup.conf'
    thread_timeout=str(60)

    #Adding python version to the telemetry
    try:
        python_version_info = sys.version_info
        python_version = str(sys.version_info[0])+ '.'  + str(sys.version_info[1]) + '.'  + str(sys.version_info[2])
        HandlerUtil.HandlerUtility.add_to_telemetery_data("pythonVersion", python_version)
    except Exception as e:
        errMsg = 'Failed to do retrieve python version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
        backup_logger.log(errMsg, True, 'Error')

    #fetching platform architecture
    try:
        architecture = platform.architecture()[0]
        HandlerUtil.HandlerUtility.add_to_telemetery_data("platformArchitecture", architecture)
    except Exception as e:
        errMsg = 'Failed to do retrieve "platform architecture" with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
        backup_logger.log(errMsg, True, 'Error')

    try:
        if(freezer.mounts is not None):
            hutil.partitioncount = len(freezer.mounts.mounts)
        backup_logger.log(" configfile " + str(configfile), True)
        config = ConfigParsers.ConfigParser()
        config.read(configfile)
        if config.has_option('SnapshotThread','timeout'):
            thread_timeout= config.get('SnapshotThread','timeout')
    except Exception as e:
        errMsg='cannot read config file or file not present'
        backup_logger.log(errMsg, True, 'Warning')
    backup_logger.log("final thread timeout" + thread_timeout, True)
    
    snapshot_info_array = None

    try:
        # we need to freeze the file system first
        backup_logger.log('starting daemon', True)
        """
        protectedSettings is the privateConfig passed from Powershell.
        WATCHOUT that, the _context_config are using the most freshest timestamp.
        if the time sync is alive, this should be right.
        """
        protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')
        public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
        para_parser = ParameterParser(protected_settings, public_settings, backup_logger)


        if(bool(public_settings) and not protected_settings): #Protected settings decryption failed case
            error_msg = "unable to load certificate"
            hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedHandlerGuestAgentCertificateNotFound)
            temp_result=CommonVariables.FailedHandlerGuestAgentCertificateNotFound
            temp_status= 'error'
            exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)

        if(para_parser.commandStartTimeUTCTicks is not None and para_parser.commandStartTimeUTCTicks != ""):
            utcTicksLong = int(para_parser.commandStartTimeUTCTicks)
            backup_logger.log('utcTicks in long format' + str(utcTicksLong), True)
            commandStartTime = convert_time(utcTicksLong)
            utcNow = datetime.datetime.utcnow()
            backup_logger.log('command start time is ' + str(commandStartTime) + " and utcNow is " + str(utcNow), True)
            timespan = utcNow - commandStartTime
            MAX_TIMESPAN = 150 * 60 # in seconds
            # handle the machine identity for the restoration scenario.
            total_span_in_seconds = timedelta_total_seconds(timespan)
            backup_logger.log('timespan is ' + str(timespan) + ' ' + str(total_span_in_seconds))

        if(para_parser.taskId is not None and para_parser.taskId != ""):
            backup_logger.log('taskId: ' + str(para_parser.taskId), True)
            exit_if_same_taskId(para_parser.taskId) 
            taskIdentity = TaskIdentity()
            taskIdentity.save_identity(para_parser.taskId)
        
        hutil.save_seq()

        commandToExecute = para_parser.commandToExecute
        #validate all the required parameter here
        backup_logger.log(commandToExecute,True)
        if(CommonVariables.iaas_install_command in commandToExecute.lower()):
            backup_logger.log('install succeed.',True)
            run_status = 'success'
            error_msg = 'Install Succeeded'
            run_result = CommonVariables.success
            backup_logger.log(error_msg)
        elif(CommonVariables.iaas_vmbackup_command in commandToExecute.lower()):
            if(para_parser.backup_metadata is None or para_parser.public_config_obj is None or para_parser.private_config_obj is None):
                run_result = CommonVariables.error_parameter
                hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)
                run_status = 'error'
                error_msg = 'required field empty or not correct'
                backup_logger.log(error_msg, True, 'Error')
            else:
                backup_logger.log('commandToExecute is ' + commandToExecute, True)
                """
                make sure the log is not doing when the file system is freezed.
                """
                temp_status= 'success'
                temp_result=CommonVariables.ExtensionTempTerminalState
                temp_msg='Transitioning state in extension'
                blob_report_msg, file_report_msg = get_status_to_report(temp_status, temp_result, temp_msg, None)
                status_report_to_file(file_report_msg)
                status_report_to_blob(blob_report_msg)
                #partial logging before freeze
                if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
                    backup_logger.commit_to_blob(para_parser.logsBlobUri)
                else:
                    backup_logger.log("the logs blob uri is not there, so do not upload log.")
                backup_logger.log('commandToExecute is ' + commandToExecute, True)

                PluginHostObj = PluginHost(logger=backup_logger)
                PluginHostErrorCode,dobackup,g_fsfreeze_on = PluginHostObj.pre_check()
                doFsConsistentbackup = False
                appconsistentBackup = False

                if not (PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigParsing or
                        PluginHostErrorCode == CommonVariables.FailedPrepostPluginConfigParsing or
                        PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigNotFound or
                        PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigPermissionError or
                        PluginHostErrorCode == CommonVariables.FailedPrepostPluginConfigNotFound):
                    backup_logger.log('App Consistent Consistent Backup Enabled', True)
                    HandlerUtil.HandlerUtility.add_to_telemetery_data("isPrePostEnabled", "true")
                    appconsistentBackup = True

                if(PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success):
                    backup_logger.log('Triggering File System Consistent Backup because of error code' + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(PluginHostErrorCode), True)
                    doFsConsistentbackup = True

                preResult = PluginHostResult()
                postResult = PluginHostResult()

                if not doFsConsistentbackup:
                    preResult = PluginHostObj.pre_script()
                    dobackup = preResult.continueBackup

                    if(g_fsfreeze_on == False and preResult.anyScriptFailed):
                        dobackup = False

                if dobackup:
                    freeze_snapshot(thread_timeout)

                if not doFsConsistentbackup:
                    postResult = PluginHostObj.post_script()
                    if not postResult.continueBackup:
                        dobackup = False
                
                    if(g_fsfreeze_on == False and postResult.anyScriptFailed):
                        dobackup = False

                if not dobackup:
                    if run_result == CommonVariables.success and PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success:
                        run_status = 'error'
                        run_result = PluginHostErrorCode
                        hutil.SetExtErrorCode(PluginHostErrorCode)
                        error_msg = 'Plugin Host Precheck Failed'
                        error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
                        backup_logger.log(error_msg, True)

                    if run_result == CommonVariables.success:
                        pre_plugin_errors = preResult.errors
                        for error in pre_plugin_errors:
                            if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
                                run_status = 'error'
                                run_result = error.errorCode
                                hutil.SetExtErrorCode(error.errorCode)
                                error_msg = 'PreScript failed for the plugin ' +  error.pluginName
                                error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
                                backup_logger.log(error_msg, True)
                                break

                    if run_result == CommonVariables.success:
                        post_plugin_errors = postResult.errors
                        for error in post_plugin_errors:
                            if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
                                run_status = 'error'
                                run_result = error.errorCode
                                hutil.SetExtErrorCode(error.errorCode)
                                error_msg = 'PostScript failed for the plugin ' +  error.pluginName
                                error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
                                backup_logger.log(error_msg, True)
                                break

                if appconsistentBackup:
                    if(PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success):
                        hutil.SetExtErrorCode(PluginHostErrorCode)
                    pre_plugin_errors = preResult.errors
                    for error in pre_plugin_errors:
                        if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
                            hutil.SetExtErrorCode(error.errorCode)
                    post_plugin_errors = postResult.errors
                    for error in post_plugin_errors:
                        if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
                            hutil.SetExtErrorCode(error.errorCode)

                if run_result == CommonVariables.success and not doFsConsistentbackup and not (preResult.anyScriptFailed or postResult.anyScriptFailed):
                    run_status = 'success'
                    run_result = CommonVariables.success_appconsistent
                    hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success_appconsistent)
                    error_msg = 'Enable Succeeded with App Consistent Snapshot'
                    backup_logger.log(error_msg, True)

        else:
            run_status = 'error'
            run_result = CommonVariables.error_parameter
            hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)
            error_msg = 'command is not correct'
            backup_logger.log(error_msg, True, 'Error')
    except Exception as e:
        errMsg = 'Failed to enable the extension with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
        backup_logger.log(errMsg, True, 'Error')
        global_error_result = e

    """
    we do the final report here to get rid of the complex logic to handle the logging when file system be freezed issue.
    """
    try:
        if(global_error_result is not None):
            if(hasattr(global_error_result,'errno') and global_error_result.errno == 2):
                run_result = CommonVariables.error_12
                hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_12)
            elif(para_parser is None):
                run_result = CommonVariables.error_parameter
                hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)
            else:
                run_result = CommonVariables.error
                hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)
            run_status = 'error'
            error_msg  += ('Enable failed.' + str(global_error_result))
        status_report_msg = None
        hutil.SetExtErrorCode(run_result) #setting extension errorcode at the end if missed somewhere
        HandlerUtil.HandlerUtility.add_to_telemetery_data("extErrorCode", str(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeNameDict[hutil.ExtErrorCode]))
        total_used_size = -1
        blob_report_msg, file_report_msg = get_status_to_report(run_status,run_result,error_msg, snapshot_info_array)
        if(hutil.is_status_file_exists()):
            status_report_to_file(file_report_msg)
        status_report_to_blob(blob_report_msg)
    except Exception as e:
        errMsg = 'Failed to log status in extension'
        backup_logger.log(errMsg, True, 'Error')
    if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
        backup_logger.commit(para_parser.logsBlobUri)
    else:
        backup_logger.log("the logs blob uri is not there, so do not upload log.")
        backup_logger.commit_to_local()

    sys.exit(0)
예제 #10
0
def enable():
    hutil.do_parse_context('Enable')
    # we need to start another subprocess to do it, because the initial process
    # would be killed by the wala in 5 minutes.
    logger.log('enabling...')

    """
    trying to mount the crypted items.
    """
    disk_util = DiskUtil(hutil = hutil, patching = MyPatching, logger = logger, encryption_environment = encryption_environment)
    bek_util = BekUtil(disk_util, logger)
    
    existed_passphrase_file = None
    encryption_config = EncryptionConfig(encryption_environment=encryption_environment, logger = logger)
    config_path_result = disk_util.make_sure_path_exists(encryption_environment.encryption_config_path)
    if(config_path_result != CommonVariables.process_success):
        logger.log(msg="azure encryption path creation failed.",level=CommonVariables.ErrorLevel)
    if(encryption_config.config_file_exists()):
        existed_passphrase_file = bek_util.get_bek_passphrase_file(encryption_config)
        if(existed_passphrase_file is not None):
            mount_encrypted_disks(disk_util=disk_util,bek_util=bek_util,encryption_config=encryption_config,passphrase_file=existed_passphrase_file)
        else:
            logger.log(msg="the config is there, but we could not get the bek file.",level=CommonVariables.WarningLevel)
            exit_without_status_report()

    # handle the re-call scenario.  the re-call would resume?
    # if there's one tag for the next reboot.
    encryption_marker = EncryptionMarkConfig(logger, encryption_environment)
    if (not encryption_marker.config_file_exists()):
        machine_identity = MachineIdentity()
        stored_identity = machine_identity.stored_identity()
        if(stored_identity is None):
            machine_identity.save_identity()
        else:
            current_identity = machine_identity.current_identity()
            if(current_identity != stored_identity):
                current_seq_no = -1
                backup_logger.log("machine identity not same, set current_seq_no to " + str(current_seq_no) + " " + str(stored_identity) + " " + str(current_identity), True)
                hutil.set_last_seq(current_seq_no)
                machine_identity.save_identity()
                # we should be careful about proceed for this case, we just
                # failed this time to wait for customers' retry.
                exit_without_status_report()

    hutil.exit_if_same_seq()
    hutil.save_seq()

    try:
        protected_settings_str = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')
        public_settings_str = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
        if(isinstance(public_settings_str,basestring)):
            public_settings = json.loads(public_settings_str)
        else:
            public_settings = public_settings_str;

        if(isinstance(protected_settings_str,basestring)):
            protected_settings = json.loads(protected_settings_str)
        else:
            protected_settings = protected_settings_str
        extension_parameter = ExtensionParameter(hutil, protected_settings, public_settings)
        
        kek_secret_id_created = None

        encryption_marker = EncryptionMarkConfig(logger, encryption_environment)
        if encryption_marker.config_file_exists():
            # verify the encryption mark
            logger.log(msg="encryption mark is there, starting daemon.",level=CommonVariables.InfoLevel)
            start_daemon()
        else:
            if(encryption_config.config_file_exists() and existed_passphrase_file is not None):
                logger.log(msg="config file exists and passphrase file exists.", level=CommonVariables.WarningLevel)
                encryption_marker = mark_encryption(command=extension_parameter.command, \
                                                  volume_type=extension_parameter.VolumeType, \
                                                  disk_format_query=extension_parameter.DiskFormatQuery)
                start_daemon()
            else:
                """
                creating the secret, the secret would be transferred to a bek volume after the updatevm called in powershell.
                """
                #store the luks passphrase in the secret.
                keyVaultUtil = KeyVaultUtil(logger)

                """
                validate the parameters
                """
                if(extension_parameter.VolumeType != 'Data'):
                    hutil.do_exit(0, 'Enable', CommonVariables.extension_error_status,str(CommonVariables.volue_type_not_support), 'VolumeType ' + str(extension_parameter.VolumeType) + ' is not supported.')

                if(extension_parameter.command not in [CommonVariables.EnableEncryption, CommonVariables.EnableEncryptionFormat]):
                    hutil.do_exit(0, 'Enable', CommonVariables.extension_error_status,str(CommonVariables.command_not_support), 'Command ' + str(extension_parameter.command) + ' is not supported.')

                """
                this is the fresh call case
                """
                #handle the passphrase related
                if(existed_passphrase_file is None):
                    if(extension_parameter.passphrase is None or extension_parameter.passphrase == ""):
                        extension_parameter.passphrase = bek_util.generate_passphrase(extension_parameter.KeyEncryptionAlgorithm)
                    else:
                        logger.log(msg="the extension_parameter.passphrase is none")

                    kek_secret_id_created = keyVaultUtil.create_kek_secret(Passphrase = extension_parameter.passphrase,\
                    KeyVaultURL = extension_parameter.KeyVaultURL,\
                    KeyEncryptionKeyURL = extension_parameter.KeyEncryptionKeyURL,\
                    AADClientID = extension_parameter.AADClientID,\
                    KeyEncryptionAlgorithm = extension_parameter.KeyEncryptionAlgorithm,\
                    AADClientSecret = extension_parameter.AADClientSecret,\
                    DiskEncryptionKeyFileName = extension_parameter.DiskEncryptionKeyFileName)

                    if(kek_secret_id_created is None):
                        hutil.do_exit(0, 'Enable', CommonVariables.extension_error_status, str(CommonVariables.create_encryption_secret_failed), 'Enable failed.')
                    else:
                        encryption_config.passphrase_file_name = extension_parameter.DiskEncryptionKeyFileName
                        encryption_config.bek_filesystem = CommonVariables.BekVolumeFileSystem
                        encryption_config.secret_id = kek_secret_id_created
                        encryption_config.commit()
   
                encryption_marker = mark_encryption(command=extension_parameter.command, \
                                                  volume_type=extension_parameter.VolumeType, \
                                                  disk_format_query=extension_parameter.DiskFormatQuery)

                if(kek_secret_id_created != None):
                    hutil.do_exit(0, 'Enable', CommonVariables.extension_success_status, str(CommonVariables.success), str(kek_secret_id_created))
                else:
                    """
                    the enabling called again. the passphrase would be re-used.
                    """
                    hutil.do_exit(0, 'Enable', CommonVariables.extension_success_status, str(CommonVariables.encrypttion_already_enabled), str(kek_secret_id_created))
    except Exception as e:
        logger.log(msg="Failed to enable the extension with error: %s, stack trace: %s" % (str(e), traceback.format_exc()),level=CommonVariables.ErrorLevel)
        hutil.do_exit(0, 'Enable',CommonVariables.extension_error_status,str(CommonVariables.unknown_error), 'Enable failed.')