Пример #1
0
def get_tasks(pull_status):
    global ignore_files
    changes = get_changeset()
    next_job = False
    for file_name, revision in changes.iteritems():
        if DEBUG and INCLUDE and file_name not in INCLUDE:
            #debug mode, file_name is not in INCLUDE
            ignore_files += 1
            continue
        tasks = {}

        logger.debug(
            "Begin to check whether the file '{}' need synchronization or not."
            .format(file_name))
        action = ""
        try:
            segments = file_name.split('/', 2)
            if revision in ['A', 'M']:
                action = "update"
                file_content = hg.cat([file_name], rev="tip")
            elif revision == 'R':
                action = "remove"
                pre_rev = previous(HG_NODE)
                try:
                    file_content = hg.cat([file_name], rev=pre_rev)
                except:
                    #can't get the file content
                    logger.error("Can't get file '{}' content, ignore.".format(
                        file_name))
                    pull_status.get_task_status(file_name).set_message(
                        "message", "Failed to read file content, ignored.")
                    pull_status.get_task_status(file_name).set_message(
                        "action", action)
                    pull_status.get_task_status(file_name).succeed()
                    pull_status.get_task_status(
                        file_name).last_process_time = now()
                    continue

            sync_job = parse_job(file_name, action, file_content)
            action = sync_job["action"]
            if action == 'none':
                #no action is required
                #pull_status.get_task_status(file_name).set_message("message","No action is required.")
                #pull_status.get_task_status(file_name).set_message("action","None")
                #pull_status.get_task_status(file_name).succeed()
                #pull_status.get_task_status(file_name).last_process_time = now()
                logger.debug(
                    "No action is required fot the file '{}', ignore. ".format(
                        file_name))
                continue

            logger.debug(
                "The file '{}' is requested to perform '{}' action".format(
                    file_name, action))
            sync_job["status"] = SlaveSyncStatus(file_name, action,
                                                 file_content)
            #load meta data, if meta data is saved into a separated file
            load_metafile(sync_job)
            #convert bbox to array if bbox is a string
            if "bbox" in sync_job and isinstance(sync_job["bbox"], basestring):
                sync_job["bbox"] = json.loads(sync_job["bbox"])
            #tasks will be added only after if a sync job has some unexecuted task or unsuccessful task.
            job_failed = False
            next_job = False
            for task_type in ordered_sync_task_type:
                if task_type not in sync_tasks_metadata: continue
                if task_type not in sync_tasks: continue
                for (task_metadata,
                     task_logger) in sync_tasks_metadata[task_type]:
                    try:
                        #if task_type == "update_access_rules":
                        #    import ipdb;ipdb.set_trace()
                        if not is_sync_task(sync_job, segments, action,
                                            task_metadata):
                            continue

                        if task_metadata[JOB_DEF_INDEX][CHANNEL_SUPPORT_INDEX]:
                            sync_job["channel"] = segments[0]

                        #check whether this task is already executed or not
                        if not job_failed and sync_job[
                                'status'].get_task_status(
                                    task_type).is_succeed:
                            #this task is already succeed, continue
                            logger.debug(
                                "The task '{1}' is already done on the file '{0}',ignore"
                                .format(file_name, task_type))
                            break

                        #this task is not succeed or executed before, add this task to sync tasks
                        job_failed = True
                        task_name = taskname(sync_job, task_metadata)
                        if task_type not in tasks:
                            tasks[task_type] = {}
                        if task_name in sync_tasks[task_type]:
                            #task is already exist, this is a shared task
                            shared_task = sync_tasks[task_type][task_name]
                            if isinstance(shared_task, list):
                                task_status = shared_task[0][0][
                                    'status'].get_task_status(task_type)
                                tasks[task_type][task_name] = shared_task + [
                                    (sync_job, task_metadata, task_logger)
                                ]
                            else:
                                task_status = shared_task[0][
                                    'status'].get_task_status(task_type)
                                task_status.shared = True
                                tasks[task_type][task_name] = [
                                    shared_task,
                                    (sync_job, task_metadata, task_logger)
                                ]
                            tasks[task_type][task_name] = sorted(
                                tasks[task_type][task_name],
                                key=lambda x: x[0]['job_file'],
                                reverse=True)
                            sync_job['status'].set_task_status(
                                task_type, task_status)
                        else:
                            #init a default status object for this task
                            sync_job['status'].get_task_status(task_type)

                            tasks[task_type][task_name] = (sync_job,
                                                           task_metadata,
                                                           task_logger)

                        #if task_type == "create_workspace": raise Exception("Failed for testing.")
                        break
                    except:
                        #preprocess the file failed, continue to the next file
                        message = traceback.format_exc()
                        logger.error(message)
                        tasks.clear()
                        sync_job['status'].get_task_status(
                            task_type).set_message(
                                "message",
                                "Preprocess the file failed. err = {0}".format(
                                    message))
                        sync_job['status'].get_task_status(task_type).failed()
                        #this job is failed, try to add a notify task
                        for notify_metadata, notify_logger in notify_tasks_metadata:
                            if is_sync_task(sync_job, segments, action,
                                            notify_metadata):
                                notify_tasks.append(
                                    (sync_job, notify_metadata, notify_logger))
                                break
                        pull_status.get_task_status(file_name).set_message(
                            "action", action)
                        pull_status.get_task_status(file_name).set_message(
                            "message",
                            "Preprocess the file failed. err = {0}".format(
                                message))
                        pull_status.get_task_status(file_name).failed()
                        pull_status.get_task_status(
                            file_name).last_process_time = now()
                        next_job = True
                        break

                if next_job:
                    break

            if next_job:
                continue

            #add the sync job's tasks to the total sync tasks.
            for key, val in tasks.iteritems():
                sync_tasks[key].update(val)

            if tasks:
                #this job has some sync tasks to do,
                #try to add a prepare task
                for task_metadata, task_logger in prepare_tasks_metadata:
                    if is_sync_task(sync_job, segments, action, task_metadata):
                        prepare_tasks.append(
                            (sync_job, task_metadata, task_logger))
                        break

                #try to add a notify task
                for task_metadata, task_logger in notify_tasks_metadata:
                    if is_sync_task(sync_job, segments, action, task_metadata):
                        notify_tasks.append(
                            (sync_job, task_metadata, task_logger))
                        break
                pull_status.get_task_status(file_name).set_message(
                    "message", "Ready to synchronize")
                pull_status.get_task_status(file_name).set_message(
                    "action", action)
                pull_status.get_task_status(file_name).succeed()
                pull_status.get_task_status(
                    file_name).last_process_time = now()
            else:
                logger.debug(
                    "File({}) has been synchronized or no need to synchronize".
                    format(file_name))

        except:
            pull_status.get_task_status(file_name).failed()
            message = traceback.format_exc()
            pull_status.get_task_status(file_name).set_message(
                "message", message)
            pull_status.get_task_status(file_name).last_process_time = now()
            logger.error("Add the '{1}' task for ({0}) failed.{2}".format(
                file_name, action, traceback.format_exc()))
Пример #2
0
def sync():
    if DEBUG:
        logger.debug("Run in debug mode.")
        if INCLUDE:
            logger.debug("Only the files({}) will be processed.".format(",".join(INCLUDE)))
	
    try:
        for init_method in module_init_handlers:
            init_method()

        pull_status = SlaveSyncStatus.get_bitbucket_status()
        get_tasks(pull_status)
        slave_sync_notify.SlaveServerSyncNotify.send_last_sync_time(pull_status)

        logger.info("HG_NODE: {}".format(HG_NODE))
        for task_type in ordered_sync_task_type:
            for task_name,task in sync_tasks[task_type].iteritems():    
                if isinstance(task,list):
                    #shared task
                    logger.info("Shared Task : {0}  {1} = [{2}]".format(task_type,task_name,",".join([t[0]['job_file'] for t in task])))
                else:
                    #unshared task
                    logger.info("Task : {0}  {1} = {2}".format(task_type,task_name,task[0]['job_file']))
        for task in notify_tasks:
            logger.info("Task : {0}  {1} = {2}".format("send_notify",taskname(task[0],task[1]),task[0]['job_file']))

        #prepare tasks
        for task in prepare_tasks:
            execute_prepare_task(*task)

        #execute tasks
        for task_type in ordered_sync_task_type:
            for task in sync_tasks[task_type].values():    
                if isinstance(task,list):
                    #shared task
                    for shared_task in task:
                        execute_task(*shared_task)
                else:
                    #unshared task
                    execute_task(*task)

        if SlaveSyncStatus.all_succeed():
            logger.info("All done!")
        else:
            raise Exception("Some files({0}) are processed failed.".format(' , '.join([s.file for s in SlaveSyncStatus.get_failed_status_objects()])))

        if ignore_files:
            raise Exception("{} files are ignored in debug mode,rollback!".format(ignore_files))
        if ROLLBACK:
            raise Exception("Rollback for testing")
        return
    finally:
        #save notify status 
        SlaveSyncStatus.save_all()
        #send notify
        for task in notify_tasks:
            execute_notify_task(*task)

        #clear all tasks
        for k in sync_tasks.keys():
            sync_tasks[k].clear()

        for reset_method in module_reset_handlers:
            reset_method()
Пример #3
0
def sync():
    if DEBUG:
        logger.debug("Run in debug mode.")
        if INCLUDE:
            logger.debug("Only the files({}) will be processed.".format(
                ",".join(INCLUDE)))

    try:
        for init_method in module_init_handlers:
            init_method()

        pull_status = SlaveSyncStatus.get_bitbucket_status()
        get_tasks(pull_status)
        try:
            slave_sync_notify.SlaveServerSyncNotify.send_last_sync_time(
                pull_status)
        except:
            pass
        logger.info("HG_NODE: {}".format(HG_NODE))
        for task_type in ordered_sync_task_type:
            for task_name, task in sync_tasks[task_type].iteritems():
                if isinstance(task, list):
                    #shared task
                    logger.info("Shared Task : {0}  {1} = [{2}]".format(
                        task_type, task_name,
                        ",".join([t[0]['job_file'] for t in task])))
                else:
                    #unshared task
                    logger.info("Task : {0}  {1} = {2}".format(
                        task_type, task_name, task[0]['job_file']))
        for task in notify_tasks:
            logger.info("Task : {0}  {1} = {2}".format(
                "send_notify", taskname(task[0], task[1]),
                task[0]['job_file']))

        #prepare tasks
        for task in prepare_tasks:
            execute_prepare_task(*task)

        #execute tasks
        for task_type in ordered_sync_task_type:
            for task in sync_tasks[task_type].values():
                if isinstance(task, list):
                    #shared task
                    for shared_task in task:
                        execute_task(*shared_task)
                else:
                    #unshared task
                    execute_task(*task)

        if SlaveSyncStatus.all_succeed():
            logger.info("All done!")
        else:
            raise Exception("Some files({0}) are processed failed.".format(
                ' , '.join([
                    s.file
                    for s in SlaveSyncStatus.get_failed_status_objects()
                ])))

        if ignore_files:
            raise Exception(
                "{} files are ignored in debug mode,rollback!".format(
                    ignore_files))
        if ROLLBACK:
            raise Exception("Rollback for testing")
        return
    finally:
        #save notify status
        SlaveSyncStatus.save_all()
        #send notify
        for task in notify_tasks:
            execute_notify_task(*task)

        #clear all tasks
        for k in sync_tasks.keys():
            sync_tasks[k].clear()

        for reset_method in module_reset_handlers:
            reset_method()
Пример #4
0
def sync():
    try:
        for init_method in module_init_handlers:
            init_method()

        pull_status = SlaveSyncStatus.get_bitbucket_status()
        get_tasks(pull_status)
        slave_sync_notify.SlaveServerSyncNotify.send_last_sync_time(pull_status)

        logger.info("HG_NODE: {}".format(HG_NODE))
        for task_type in ordered_sync_task_type:
            for task_name,task in sync_tasks[task_type].iteritems():    
                if isinstance(task,list):
                    #shared task
                    logger.info("Shared Task : {0}  {1} = [{2}]".format(task_type,task_name,",".join([t[0]['job_file'] for t in task])))
                else:
                    #unshared task
                    logger.info("Task : {0}  {1} = {2}".format(task_type,task_name,task[0]['job_file']))
        for task in notify_tasks:
            logger.info("Task : {0}  {1} = {2}".format("send_notify",taskname(task[0],task[1]),task[0]['job_file']))

        expected_executed_tasks = -1
        executed_task = 0
        for task_type in ordered_sync_task_type:
            for task in sync_tasks[task_type].values():    
                if executed_task == expected_executed_tasks: break
                if isinstance(task,list):
                    #shared task
                    for shared_task in task:
                        execute_task(*shared_task)
                else:
                    #unshared task
                    execute_task(*task)

                executed_task += 1
            if executed_task == expected_executed_tasks: break

        if SlaveSyncStatus.all_succeed():
            logger.info("All done!")
        else:
            raise Exception("Some files({0}) are processed failed.".format(' , '.join([s.file for s in SlaveSyncStatus.get_failed_status_objects()])))

        #raise Exception("Rollback for testing")
        return
    finally:
        #save notify status 
        SlaveSyncStatus.save_all()
        #send notify
        for task in notify_tasks:
            logger.info("Begin to send notify for task ({0}).".format(taskname(task[0],task[1])))
            try:
                task[1][TASK_HANDLER_INDEX](*task)
                logger.info("Succeed to send notify for task ({0}).".format(taskname(task[0],task[1])))
            except:
                logger.error("Failed to send notify for task ({0}). {1}".format(taskname(task[0],task[1]),traceback.format_exc()))
        #clear all tasks
        for k in sync_tasks.keys():
            sync_tasks[k].clear()

        for reset_method in module_reset_handlers:
            reset_method()