示例#1
0
文件: queue.py 项目: mincau/TACTIC
    def check_existing_jobs(self):
        self.keep_jobs = []
        for job in self.jobs:
            job_code = job.get_code()
            search = Search(self.get_job_search_type())
            search.add_filter("code", job_code)
            job = search.get_sobject()

            if not job:
                print("Cancel ....")
                scheduler = Scheduler.get()
                scheduler.cancel_task(job_code)
                continue

            state = job.get_value("state")
            if state == 'cancel':
                print("Cancel task [%s] ...." % job_code)
                scheduler = Scheduler.get()
                scheduler.cancel_task(job_code)

                job.set_value("state", "terminated")
                job.commit()
                continue

            self.keep_jobs.append(job)

        self.jobs = self.keep_jobs
示例#2
0
文件: queue.py 项目: hellios78/TACTIC
    def check_existing_jobs(my):
        my.keep_jobs = []
        for job in my.jobs:
            job_code = job.get_code()
            search = Search(my.get_job_search_type())
            search.add_filter("code", job_code)
            job = search.get_sobject()

            if not job:
                print "Cancel ...."
                scheduler = Scheduler.get()
                scheduler.cancel_task(job_code)
                continue

            state = job.get_value("state")
            if state == 'cancel':
                print "Cancel task [%s] ...." % job_code
                scheduler = Scheduler.get()
                scheduler.cancel_task(job_code)

                job.set_value("state", "terminated")
                job.commit()
                continue

            my.keep_jobs.append(job)

        my.jobs = my.keep_jobs
示例#3
0
文件: queue.py 项目: mincau/TACTIC
    def start(**kwargs):
         
        scheduler = Scheduler.get()
        scheduler.start_thread()
        task = JobTask(**kwargs)
        task.cleanup_db_jobs()

        scheduler.add_single_task(task, mode='threaded', delay=1)
示例#4
0
文件: queue.py 项目: hellios78/TACTIC
    def start(**kwargs):

        scheduler = Scheduler.get()
        scheduler.start_thread()
        task = JobTask(**kwargs)
        task.cleanup_db_jobs()

        scheduler.add_single_task(task, mode='threaded', delay=1)
示例#5
0
    def start(cls):

        print "Running Watch Folder ..."

        # Check whether the user define the drop folder path.
        # Default dop folder path: /tmp/drop
        parser = OptionParser()
        parser.add_option("-p", "--project", dest="project", help="Define the project_name.")
        parser.add_option("-d", "--drop_path", dest="drop_path", help="Define drop folder path")
        parser.add_option("-s", "--search_type", dest="search_type", help="Define search_type.")
        parser.add_option("-P", "--process", dest="process", help="Define process.")
        parser.add_option("-S", "--script_path",dest="script_path", help="Define script_path.")
        (options, args) = parser.parse_args()

        



        if options.project != None :
            project_code= options.project
        else:
            project_code= 'jobs'

        if options.drop_path!=None :
            drop_path= options.drop_path
        else:
            tmp_dir = Environment.get_tmp_dir()
            drop_path = "%s/drop" % tmp_dir
        print "    using [%s]" % drop_path
        if not os.path.exists(drop_path):
            os.makedirs(drop_path)

        if options.search_type!=None :
            search_type = options.search_type
        else:
            search_type = 'jobs/media'

        if options.process!=None :
            process = options.process
        else:
            process= 'publish'

        if options.script_path!=None :
            script_path = options.script_path
        else:
            script_path="None"
          




        task = WatchDropFolderTask(base_dir=drop_path, project_code=project_code,search_type=search_type, process=process,script_path=script_path)
        
        scheduler = Scheduler.get()
        scheduler.add_single_task(task, delay=1)
        scheduler.start_thread()
        return scheduler
示例#6
0
    def init_scheduler(my):

        scheduler = Scheduler.get()

        if my.mode == 'basic':
            my.start_basic_tasks(scheduler)
        else:
            my.start_cache_tasks(scheduler)
            my.start_basic_tasks(scheduler)

        print "Starting Scheduler ...."
        scheduler.start_thread()
示例#7
0
    def init_scheduler(self):

        scheduler = Scheduler.get()

        if self.mode == 'basic':
            self.start_basic_tasks(scheduler)
        else:
            self.start_cache_tasks(scheduler)
            self.start_basic_tasks(scheduler)

        print "Starting Scheduler ...."
        scheduler.start_thread()
示例#8
0
    def init_scheduler(my):

        scheduler = Scheduler.get()

        if my.mode == 'basic':
            my.start_basic_tasks(scheduler)
        else:
            my.start_cache_tasks(scheduler)
            my.start_basic_tasks(scheduler)

        print "Starting Scheduler ...."
        scheduler.start_thread()
示例#9
0
    def init_scheduler(self):

        scheduler = Scheduler.get()

        if self.mode == 'basic':
            self.start_basic_tasks(scheduler)
        else:
            self.start_cache_tasks(scheduler)
            self.start_basic_tasks(scheduler)

        print "Starting Scheduler ...."
        scheduler.start_thread()
示例#10
0
    def reenable_user(my, login_sobject, delay):
        from tactic.command import SchedulerTask, Scheduler
        class EnableUserTask(SchedulerTask):
            def execute(my):
                Batch()
                reset_attempts = 0
                login_sobject = my.kwargs.get('sobject')
                login_sobject.set_value("license_type", "user")
                login_sobject.set_value("login_attempt", reset_attempts)
                login_sobject.commit(triggers=False)

        scheduler = Scheduler.get()
        task = EnableUserTask(sobject=login_sobject, delay=delay)
        scheduler.add_single_task(task, delay)
        scheduler.start_thread()
示例#11
0
    def reenable_user(self, login_sobject, delay):
        from tactic.command import SchedulerTask, Scheduler

        class EnableUserTask(SchedulerTask):
            def execute(self):
                Batch()
                reset_attempts = 0
                login_sobject = self.kwargs.get('sobject')
                login_sobject.set_value("license_type", "user")
                login_sobject.set_value("login_attempt", reset_attempts)
                login_sobject.commit(triggers=False)

        scheduler = Scheduler.get()
        task = EnableUserTask(sobject=login_sobject, delay=delay)
        scheduler.add_single_task(task, delay)
        scheduler.start_thread()
示例#12
0
 def execute(my):
     import cherrypy
     print
     print "Stopping TACTIC ..."
     print 
     print " ... stopping Schduler"
     scheduler = Scheduler.get()
     scheduler.stop()
     print " ... stopping Cherrypy"
     cherrypy.engine.stop()
     cherrypy.engine.exit()
     print " ... closing DB connections"
     DbContainer.close_all_global_connections()
     print " ... kill current process"
     Common.kill()
     print "Done."
示例#13
0
 def execute(my):
     import cherrypy
     print
     print "Stopping TACTIC ..."
     print
     print " ... stopping Schduler"
     scheduler = Scheduler.get()
     scheduler.stop()
     print " ... stopping Cherrypy"
     cherrypy.engine.stop()
     cherrypy.engine.exit()
     print " ... closing DB connections"
     DbContainer.close_all_global_connections()
     print " ... kill current process"
     Common.kill()
     print "Done."
示例#14
0
    def start(cls):

        print "Running Watch Folder ..."

        # Check whether the user define the drop folder path.
        # Default dop folder path: /tmp/drop
        parser = OptionParser()
        parser.add_option("-p", "--project", dest="project", help="Define the project_name.")
        parser.add_option("-d", "--drop_path", dest="drop_path", help="Define drop folder path")
        parser.add_option("-s", "--search_type", dest="search_type", help="Define search_type.")
        parser.add_option("-P", "--process", dest="process", help="Define process.")
        (options, args) = parser.parse_args()

        if options.project != None :
            project_code= options.project
        else:
            project_code= 'jobs'

        if options.drop_path!=None :
            drop_path= options.drop_path
        else:
            tmp_dir = Environment.get_tmp_dir()
            drop_path = "%s/drop" % tmp_dir
        print "    using [%s]" % drop_path
        if not os.path.exists(drop_path):
            os.makedirs(drop_path)

        if options.search_type!=None :
            search_type = options.search_type
        else:
            search_type = 'jobs/media'

        if options.process!=None :
            process = options.process
        else:
            process= 'publish'


        task = WatchDropFolderTask(base_dir=drop_path, project_code=project_code,search_type=search_type, process=process)
        
        scheduler = Scheduler.get()
        scheduler.add_single_task(task, delay=1)
        scheduler.start_thread()
        return scheduler
示例#15
0
            def execute(self):
                # wait until KillThread is premitted
                while GlobalContainer.get("KillThreadCmd:allow") == "false":
                    print "Kill locked ... waiting 5 seconds"
                    time.sleep(5)
                    continue

                import cherrypy
                print
                print "Stopping TACTIC ..."
                print 
                print " ... stopping Schduler"
                scheduler = Scheduler.get()
                scheduler.stop()
                print " ... stopping Cherrypy"
                cherrypy.engine.stop()
                cherrypy.engine.exit()
                print " ... closing DB connections"
                DbContainer.close_all_global_connections()
                print " ... kill current process"
                Common.kill()
                print "Done."
示例#16
0
            def execute(self):
                # wait until KillThread is premitted
                while GlobalContainer.get("KillThreadCmd:allow") == "false":
                    print "Kill locked ... waiting 5 seconds"
                    time.sleep(5)
                    continue

                import cherrypy
                print
                print "Stopping TACTIC ..."
                print
                print " ... stopping Schduler"
                scheduler = Scheduler.get()
                scheduler.stop()
                print " ... stopping Cherrypy"
                cherrypy.engine.stop()
                cherrypy.engine.exit()
                print " ... closing DB connections"
                DbContainer.close_all_global_connections()
                print " ... kill current process"
                Common.kill()
                print "Done."
示例#17
0
    def start(cls):

        print "Running Watch Folder ..."

        # Check whether the user define the drop folder path.
        # Default dop folder path: /tmp/drop
        parser = OptionParser()
        parser.add_option("-p",
                          "--project",
                          dest="project",
                          help="Define the project_name.")
        parser.add_option("-d",
                          "--drop_path",
                          dest="drop_path",
                          help="Define drop folder path")
        parser.add_option("-s",
                          "--search_type",
                          dest="search_type",
                          help="Define search_type.")
        parser.add_option("-P",
                          "--process",
                          dest="process",
                          help="Define process.")
        parser.add_option("-S",
                          "--script_path",
                          dest="script_path",
                          help="Define script_path.")
        parser.add_option(
            "-w",
            "--watch_folder_code",
            dest="watch_folder_code",
            help=
            "Define watch folder code. If no code is used, then it assumed that this process \
				is managed in a standalone script.")
        parser.add_option("-x", "--site", dest="site", help="Define site.")

        parser.add_option("-c",
                          "--handler",
                          dest="handler",
                          help="Define Custom Handler Class.")
        (options, args) = parser.parse_args()

        if options.project != None:
            project_code = options.project
        else:
            raise Exception("No project specified")

        if options.drop_path != None:
            drop_path = options.drop_path
        else:
            tmp_dir = Environment.get_tmp_dir()
            drop_path = "%s/drop" % tmp_dir
        print "    using [%s]" % drop_path
        if not os.path.exists(drop_path):
            os.makedirs(drop_path)

        if options.search_type != None:
            search_type = options.search_type
        else:
            search_type = None

        if options.process != None:
            process = options.process
        else:
            process = 'publish'

        if options.script_path != None:
            script_path = options.script_path
        else:
            script_path = None

        if options.site != None:
            site = options.site
        else:
            site = None

        if options.handler != None:
            handler = options.handler
        else:
            handler = None

        if options.watch_folder_code != None:
            watch_folder_code = options.watch_folder_code
        else:
            watch_folder_code = None
        if watch_folder_code:
            # record pid in watch folder pid file
            pid = os.getpid()
            pid_file = "%s/log/watch_folder.%s" % (Environment.get_tmp_dir(),
                                                   watch_folder_code)
            f = open(pid_file, "w")
            f.write(str(pid))
            f.close()

        Batch(project_code=project_code, site=site)

        task = WatchDropFolderTask(base_dir=drop_path,
                                   site=site,
                                   project_code=project_code,
                                   search_type=search_type,
                                   process=process,
                                   script_path=script_path,
                                   handler=handler,
                                   watch_folder_code=watch_folder_code)

        scheduler = Scheduler.get()
        scheduler.add_single_task(task, delay=1)
        scheduler.start_thread()
        return scheduler
示例#18
0
            f.close()

        Batch(project_code=project_code, site=site)

        task = WatchDropFolderTask(base_dir=drop_path,
                                   site=site,
                                   project_code=project_code,
                                   search_type=search_type,
                                   process=process,
                                   script_path=script_path,
                                   handler=handler,
                                   watch_folder_code=watch_folder_code)

        scheduler = Scheduler.get()
        scheduler.add_single_task(task, delay=1)
        scheduler.start_thread()
        return scheduler

    start = classmethod(start)


if __name__ == '__main__':
    WatchDropFolderTask.start()
    while 1:
        try:
            time.sleep(15)
        except (KeyboardInterrupt, SystemExit), e:
            scheduler = Scheduler.get()
            scheduler.stop()
            break
示例#19
0
    def run(self):
        import time
        time.sleep(3)

        #print("Starting Scheduler ....")

        # NOTE: not sure why we have to do a batch here
        from pyasm.security import Batch
        Batch(login_code="admin")

        timed_triggers = []

        from pyasm.biz import Project
        search = Search("sthpw/project")
        # only requires the admin project
        search.add_filter('code', 'sthpw', op='!=')
        projects = search.get_sobjects()

        # get the all of the timed triggers
        #search = Search("sthpw/timed_trigger")
        #search.add_filter("type", "timed")
        timed_trigger_sobjs = []
        for project in projects:

            project_code = project.get_code()
            try:
                search = Search("config/trigger?project=%s" % project_code)
                search.add_filter("event", "schedule")
                items = search.get_sobjects()
                if items:
                    timed_trigger_sobjs.extend(items)
            except Exception as e:
                #print("WARNING: ", e)
                continue

            # example
            """
            if project_code == 'broadcast2':
                tt = SearchType.create("config/trigger")
                tt.set_value("class_name", "tactic.command.PythonTrigger")

                # data = timed_trigges.get("data")
                tt.set_value("data", '''{
                    "type": "interval",
                    "interval": 5,
                    "delay": 5,
                    "mode": "threaded",
                    "script_path": "trigger/scheduled"
                } ''')
                timed_trigger_sobjs.append(tt)
            """

            has_triggers = False
            for trigger_sobj in timed_trigger_sobjs:
                trigger_class = trigger_sobj.get_value("class_name")
                if not trigger_class and trigger_sobj.get_value("script_path"):
                    trigger_class = 'tactic.command.PythonTrigger'

                data = trigger_sobj.get_json_value("data")

                data['project_code'] = trigger_sobj.get_project_code()

                try:
                    timed_trigger = Common.create_from_class_path(
                        trigger_class, [], data)
                    timed_trigger.set_input(data)
                    has_triggers = True

                except ImportError:
                    raise Exception("WARNING: [%s] does not exist" %
                                    trigger_class)

                timed_triggers.append(timed_trigger)

            if has_triggers and self.dev_mode:
                print("Found [%s] scheduled triggers in project [%s]..." %
                      (len(timed_triggers), project_code))

        from tactic.command import Scheduler, SchedulerTask
        scheduler = Scheduler.get()

        scheduler.start_thread()

        class TimedTask(SchedulerTask):
            def __init__(self, **kwargs):
                super(TimedTask, self).__init__(**kwargs)
                self.index = kwargs.get("index")
                self.project_code = kwargs.get("project_code")

            def execute(self):
                try:
                    #Batch()
                    #Command.execute_cmd(timed_trigger)
                    Project.set_project(self.project_code)
                    timed_triggers[self.index].execute()
                except Exception as e:
                    raise
                finally:
                    DbContainer.close_thread_sql()
                    DbContainer.commit_thread_sql()
                    DbContainer.close_all()

        for idx, timed_trigger in enumerate(timed_triggers):

            data = timed_trigger.get_input()
            if not data:
                continue
            """
            data = {
                'type': 'interval',
                'interval': 10,
                'delay': 0,
                'mode': 'threaded'
            }
            """

            project_code = data.get("project_code")
            task = TimedTask(index=idx, project_code=project_code)

            args = {}
            if data.get("mode"):
                args['mode'] = data.get("mode")

            trigger_type = data.get("type")

            if trigger_type == 'interval':

                interval = data.get("interval")
                delay = data.get("delay")

                if not interval:
                    continue

                if not delay:
                    delay = 3

                args = {
                    'interval': interval,
                    'delay': delay,
                }

                scheduler.add_interval_task(task, **args)

            elif trigger_type == "daily":

                from dateutil import parser

                args['time'] = parser.parse(data.get("time"))

                if data.get("weekdays"):
                    args['weekdays'] = eval(data.get("weekdays"))

                scheduler.add_daily_task(task, **args)

                #scheduler.add_daily_task(task, time, mode="threaded", weekdays=range(1,7))

            elif trigger_type == "weekly":
                #scheduler.add_weekly_task(task, weekday, time, mode='threaded'):
                args['time'] = parser.parse(data.get("time"))

                if data.get("weekday"):
                    args['weekday'] = eval(data.get("weekday"))

                scheduler.add_weekly_task(task, **args)
示例#20
0
文件: monitor.py 项目: mincau/TACTIC
    def run(self):
        import time
        time.sleep(3)

        #print("Starting Scheduler ....")

        # NOTE: not sure why we have to do a batch here
        from pyasm.security import Batch
        Batch(login_code="admin")

        timed_triggers = []

        from pyasm.biz import Project
        search = Search("sthpw/project")
        # only requires the admin project
        search.add_filter('code', 'sthpw', op='!=')
        projects = search.get_sobjects()

        # get the all of the timed triggers
        #search = Search("sthpw/timed_trigger")
        #search.add_filter("type", "timed")
        timed_trigger_sobjs = []
        for project in projects:

            project_code = project.get_code()
            try:
                search = Search("config/trigger?project=%s" % project_code)
                search.add_filter("event", "schedule")
                items = search.get_sobjects()
                if items:
                    timed_trigger_sobjs.extend(items)
            except Exception as e:
                #print("WARNING: ", e)
                continue

            # example
            """
            if project_code == 'broadcast2':
                tt = SearchType.create("config/trigger")
                tt.set_value("class_name", "tactic.command.PythonTrigger")

                # data = timed_trigges.get("data")
                tt.set_value("data", '''{
                    "type": "interval",
                    "interval": 5,
                    "delay": 5,
                    "mode": "threaded",
                    "script_path": "trigger/scheduled"
                } ''')
                timed_trigger_sobjs.append(tt)
            """


            has_triggers = False
            for trigger_sobj in timed_trigger_sobjs:
                trigger_class = trigger_sobj.get_value("class_name")
                if not trigger_class and trigger_sobj.get_value("script_path"):
                    trigger_class = 'tactic.command.PythonTrigger'

                data = trigger_sobj.get_json_value("data")

                data['project_code'] = trigger_sobj.get_project_code()

                try:
                    timed_trigger = Common.create_from_class_path(trigger_class, [], data)
                    timed_trigger.set_input(data)
                    has_triggers = True

                except ImportError:
                    raise Exception("WARNING: [%s] does not exist" % trigger_class)
                    
                timed_triggers.append(timed_trigger)

            if has_triggers and self.dev_mode:
                print("Found [%s] scheduled triggers in project [%s]..." % (len(timed_triggers), project_code))

        from tactic.command import Scheduler, SchedulerTask
        scheduler = Scheduler.get()

        scheduler.start_thread()



        class TimedTask(SchedulerTask):
            def __init__(self, **kwargs):
                super(TimedTask, self).__init__(**kwargs)
                self.index = kwargs.get("index")
                self.project_code = kwargs.get("project_code")

            def execute(self):
                try:
                    #Batch()
                    #Command.execute_cmd(timed_trigger)
                    Project.set_project(self.project_code)
                    timed_triggers[self.index].execute()
                except Exception as e:
                    raise
                finally:
                    DbContainer.close_thread_sql()
                    DbContainer.commit_thread_sql()
                    DbContainer.close_all()


        for idx, timed_trigger in enumerate(timed_triggers):

            data = timed_trigger.get_input()
            if not data:
                continue

            """
            data = {
                'type': 'interval',
                'interval': 10,
                'delay': 0,
                'mode': 'threaded'
            }
            """

            project_code = data.get("project_code")
            task = TimedTask(index=idx, project_code=project_code)

            args = {}
            if data.get("mode"):
                args['mode'] = data.get("mode")

            trigger_type = data.get("type")

            if trigger_type == 'interval':

                interval = data.get("interval")
                delay = data.get("delay")

                if not interval:
                    continue

                if not delay:
                    delay = 3

                args = {
                    'interval': interval,
                    'delay': delay,
                }

                scheduler.add_interval_task(task, **args)


            elif trigger_type == "daily":

                from dateutil import parser

                args['time'] = parser.parse( data.get("time") )

                if data.get("weekdays"):
                    args['weekdays'] = eval( data.get("weekdays") )

                scheduler.add_daily_task(task, **args)

                #scheduler.add_daily_task(task, time, mode="threaded", weekdays=range(1,7))

            elif trigger_type == "weekly":
                #scheduler.add_weekly_task(task, weekday, time, mode='threaded'):
                args['time'] = parser.parse( data.get("time") )

                if data.get("weekday"):
                    args['weekday'] = eval( data.get("weekday") )

                scheduler.add_weekly_task(task, **args)
示例#21
0
    def check_new_job(self, queue_type=None):

        num_jobs = len(self.jobs)
        if num_jobs >= self.max_jobs:
            print("Already at max jobs [%s]" % self.max_jobs)
            return

        self.job = self.get_next_job(queue_type)
        if not self.job:
            return

        # set the process key
        process_key = self.get_process_key()
        self.job.set_value("host", process_key)
        self.job.commit()

        self.jobs.append(self.job)

        # get some info from the job
        command = self.job.get_value("command")
        job_code = self.job.get_value("code")

        try:
            kwargs = self.job.get_json_value("data")
        except:
            try:
                # DEPRECATED
                kwargs = self.job.get_json_value("serialized")
            except:
                kwargs = {}
        if not kwargs:
            kwargs = {}

        login = self.job.get_value("login")
        script_path = self.job.get_value("script_path", no_exception=True)

        project_code = self.job.get_value("project_code")

        if script_path:
            command = 'tactic.command.PythonCmd'

            folder = os.path.dirname(script_path)
            title = os.path.basename(script_path)

            search = Search("config/custom_script")
            search.add_filter("folder", folder)
            search.add_filter("title", title)
            custom_script = search.get_sobject()
            script_code = custom_script.get_value("script")

            kwargs['code'] = script_code

        # add the job to the kwargs
        kwargs['job'] = self.job

        #print("command: ", command)
        #print("kwargs: ", kwargs)

        # Because we started a new thread, the environment may not
        # yet be initialized
        try:
            from pyasm.common import Environment
            Environment.get_env_object()
        except:
            # it usually is run at the very first transaction
            Batch()
        Project.set_project(project_code)

        queue = self.job.get_value("queue", no_exception=True)
        queue_type = 'repeat'

        stop_on_error = False

        print("Running job: ", self.job.get_value("code"))

        if queue_type == 'inline':

            cmd = Common.create_from_class_path(command, kwargs=kwargs)
            try:
                Container.put(Command.TOP_CMD_KEY, None)
                Container.put(Transaction.KEY, None)
                Command.execute_cmd(cmd)

                # set job to complete
                self.job.set_value("state", "complete")
            except Exception as e:
                self.job.set_value("state", "error")

            self.job.commit()
            self.jobs.remove(self.job)
            self.job = None

            self.jobs_completed += 1

        elif queue_type == 'repeat':

            attempts = 0
            max_attempts = 3
            retry_interval = 5
            Container.put(Transaction.KEY, None)
            while 1:

                try:
                    cmd = Common.create_from_class_path(command, kwargs=kwargs)

                    Container.put(Command.TOP_CMD_KEY, None)

                    Command.execute_cmd(cmd)
                    #cmd.execute()

                    # set job to complete
                    self.job.set_value("state", "complete")
                    break
                except TacticException as e:

                    # This is an error on this server, so just exit
                    # and don't bother retrying
                    print("Error: ", e)
                    self.job.set_value("state", "error")
                    break

                except Exception as e:
                    if stop_on_error:
                        raise
                    print("WARNING in Queue: ", e)
                    import time
                    time.sleep(retry_interval)
                    attempts += 1

                    if attempts >= max_attempts:
                        print("ERROR: reached max attempts")
                        self.job.set_value("state", "error")
                        break

                    print("Retrying [%s]...." % attempts)

            self.job.commit()
            self.jobs.remove(self.job)
            self.job = None

            self.jobs_completed += 1

        else:

            class ForkedTask(SchedulerTask):
                def __init__(self, **kwargs):
                    super(ForkedTask, self).__init__(**kwargs)

                def execute(self):
                    # check to see the status of this job
                    """
                    job = self.kwargs.get('job')
                    job_code = job.get_code()
                    search = Search("sthpw/queue")
                    search.add_filter("code", job_code)
                    self.kwargs['job'] = search.get_sobject()

                    if not job:
                        print("Cancelling ...")
                        return

                    state = job.get_value("state")
                    if state == "cancel":
                        print("Cancelling 2 ....")
                        return
                    """

                    subprocess_kwargs = {
                        'login': login,
                        'project_code': project_code,
                        'command': command,
                        'kwargs': kwargs
                    }
                    subprocess_kwargs_str = jsondumps(subprocess_kwargs)
                    install_dir = Environment.get_install_dir()
                    python = Config.get_value("services", "python")
                    if not python:
                        python = 'python'
                    args = [
                        '%s' % python,
                        '%s/src/tactic/command/queue.py' % install_dir
                    ]
                    args.append(subprocess_kwargs_str)

                    import subprocess
                    p = subprocess.Popen(args)

                    DbContainer.close_thread_sql()

                    return

                    # can't use a forked task ... need to use a system call
                    #Command.execute_cmd(cmd)

            # register this as a forked task
            task = ForkedTask(name=job_code, job=self.job)
            scheduler = Scheduler.get()
            scheduler.start_thread()

            # FIXME: the queue should not be inline
            if queue == 'interval':

                interval = self.job.get_value("interval")
                if not interval:
                    interval = 60

                scheduler.add_interval_task(task,
                                            interval=interval,
                                            mode='threaded')

            else:
                scheduler.add_single_task(task, mode='threaded')
示例#22
0
文件: queue.py 项目: mincau/TACTIC
    def check_new_job(self, queue_type=None):

        num_jobs = len(self.jobs)
        if num_jobs >= self.max_jobs:
            print("Already at max jobs [%s]" % self.max_jobs)
            return
      
        self.job = self.get_next_job(queue_type)
        if not self.job:
            return

		
        # set the process key
        process_key = self.get_process_key()
        self.job.set_value("host", process_key)
        self.job.commit()

        self.jobs.append(self.job)

        # get some info from the job
        command = self.job.get_value("command")
        job_code = self.job.get_value("code")

        try: 
            kwargs = self.job.get_json_value("data")
        except:
            try:
                # DEPRECATED
                kwargs = self.job.get_json_value("serialized")
            except:
                kwargs = {}
        if not kwargs:
            kwargs = {}

        login = self.job.get_value("login")
        script_path = self.job.get_value("script_path", no_exception=True)

        project_code = self.job.get_value("project_code")

        if script_path:
            command = 'tactic.command.PythonCmd'

            folder = os.path.dirname(script_path)
            title = os.path.basename(script_path)

            search = Search("config/custom_script")
            search.add_filter("folder", folder)
            search.add_filter("title", title)
            custom_script = search.get_sobject()
            script_code = custom_script.get_value("script")

            kwargs['code'] = script_code



        # add the job to the kwargs
        kwargs['job'] = self.job

        #print("command: ", command)
        #print("kwargs: ", kwargs)


        # Because we started a new thread, the environment may not
        # yet be initialized
        try:
            from pyasm.common import Environment
            Environment.get_env_object()
        except:
            # it usually is run at the very first transaction
            Batch()
        Project.set_project(project_code)


        queue = self.job.get_value("queue", no_exception=True)
        queue_type = 'repeat'

        stop_on_error = False

        print("Running job: ", self.job.get_value("code") )

        if queue_type == 'inline':

            cmd = Common.create_from_class_path(command, kwargs=kwargs)
            try:
                Container.put(Command.TOP_CMD_KEY, None)
                Container.put(Transaction.KEY, None)
                Command.execute_cmd(cmd)

                # set job to complete
                self.job.set_value("state", "complete")
            except Exception as e:
                self.job.set_value("state", "error")

            self.job.commit()
            self.jobs.remove(self.job)
            self.job = None

            self.jobs_completed += 1


        elif queue_type == 'repeat':
            
            
            attempts = 0
            max_attempts = 3
            retry_interval = 5
            Container.put(Transaction.KEY, None)
            while 1:
			    
                try:
                    cmd = Common.create_from_class_path(command, kwargs=kwargs)
                    
                    Container.put(Command.TOP_CMD_KEY, None)
                    
                    Command.execute_cmd(cmd)
                    #cmd.execute()

                    # set job to complete
                    self.job.set_value("state", "complete")
                    break
                except TacticException as e:

                    # This is an error on this server, so just exit
                    # and don't bother retrying
                    print("Error: ", e)
                    self.job.set_value("state", "error")
                    break


                except Exception as e:
                    if stop_on_error:
                        raise
                    print("WARNING in Queue: ", e)
                    import time
                    time.sleep(retry_interval)
                    attempts += 1

                    if attempts >= max_attempts:
                        print("ERROR: reached max attempts")
                        self.job.set_value("state", "error")
                        break

                    print("Retrying [%s]...." % attempts)

            self.job.commit()
            self.jobs.remove(self.job)
            self.job = None

            self.jobs_completed += 1


        else:
            class ForkedTask(SchedulerTask):
                def __init__(self, **kwargs):
                    super(ForkedTask, self).__init__(**kwargs)
                def execute(self):
                    # check to see the status of this job
                    """
                    job = self.kwargs.get('job')
                    job_code = job.get_code()
                    search = Search("sthpw/queue")
                    search.add_filter("code", job_code)
                    self.kwargs['job'] = search.get_sobject()

                    if not job:
                        print("Cancelling ...")
                        return

                    state = job.get_value("state")
                    if state == "cancel":
                        print("Cancelling 2 ....")
                        return
                    """

                    subprocess_kwargs = {
                        'login': login,
                        'project_code': project_code,
                        'command': command,
                        'kwargs': kwargs
                    }
                    subprocess_kwargs_str = jsondumps(subprocess_kwargs)
                    install_dir = Environment.get_install_dir()
                    python = Config.get_value("services", "python")
                    if not python:
                        python = 'python'
                    args = ['%s' % python, '%s/src/tactic/command/queue.py' % install_dir]
                    args.append(subprocess_kwargs_str)

                    import subprocess
                    p = subprocess.Popen(args)

                    DbContainer.close_thread_sql()

                    return

                    # can't use a forked task ... need to use a system call
                    #Command.execute_cmd(cmd)

            # register this as a forked task
            task = ForkedTask(name=job_code, job=self.job)
            scheduler = Scheduler.get()
            scheduler.start_thread()

            # FIXME: the queue should not be inline
            if queue == 'interval':

                interval = self.job.get_value("interval")
                if not interval:
                    interval = 60

                scheduler.add_interval_task(task, interval=interval,mode='threaded')

            else:
                scheduler.add_single_task(task, mode='threaded')
示例#23
0
        if options.script_path!=None :
            script_path = options.script_path
        else:
            script_path="None"
          




        task = WatchDropFolderTask(base_dir=drop_path, project_code=project_code,search_type=search_type, process=process,script_path=script_path)
        
        scheduler = Scheduler.get()
        scheduler.add_single_task(task, delay=1)
        scheduler.start_thread()
        return scheduler
    start = classmethod(start)

if __name__ == '__main__':
    Batch()
    WatchDropFolderTask.start()
    while 1:
        try:
            time.sleep(15)
        except (KeyboardInterrupt, SystemExit), e:
            scheduler = Scheduler.get()
            scheduler.stop()
            break



示例#24
0
class TacticSchedulerThread(threading.Thread):

    def __init__(my):
        super(TacticSchedulerThread,my).__init__()

    def _check(my):
        pass


    def run(my):
        import time
        time.sleep(3)

        print "Starting Scheduler ...."

        # NOTE: not sure why we have to do a batch here
        from pyasm.security import Batch
        Batch(login_code="admin")

        timed_triggers = []

        from pyasm.biz import Project
        search = Search("sthpw/project")
        projects = search.get_sobjects()

        # get the all of the timed triggers
        #search = Search("sthpw/timed_trigger")
        #search.add_filter("type", "timed")
        for project in projects:

            project_code = project.get_code()
            try:
                search = Search("config/trigger?project=%s" % project_code)
                search.add_filter("event", "schedule")
                timed_trigger_sobjs = search.get_sobjects()
            except Exception, e:
                print "WARNING: ", e
                continue

            # example
            """
            if project_code == 'broadcast2':
                tt = SearchType.create("config/trigger")
                tt.set_value("class_name", "tactic.command.PythonTrigger")

                # data = timed_trigges.get("data")
                tt.set_value("data", '''{
                    "type": "interval",
                    "interval": 5,
                    "delay": 5,
                    "mode": "threaded",
                    "script_path": "trigger/scheduled"
                } ''')
                timed_trigger_sobjs.append(tt)
            """


            has_triggers = False
            for trigger_sobj in timed_trigger_sobjs:
                trigger_class = trigger_sobj.get_value("class_name")
                if not trigger_class and trigger_sobj.get_value("script_path"):
                    trigger_class = 'tactic.command.PythonTrigger'

                data = trigger_sobj.get_json_value("data")

                data['project_code'] = trigger_sobj.get_project_code()

                try:
                    timed_trigger = Common.create_from_class_path(trigger_class, [], data)
                    timed_trigger.set_input(data)
                    has_triggers = True

                except ImportError:
                    raise Exception("WARNING: [%s] does not exist" % trigger_class)
                    
                timed_triggers.append(timed_trigger)

            if has_triggers:
                print "Found [%s] scheduled triggers in project [%s]..." % (len(timed_triggers), project_code)

        from tactic.command import Scheduler, SchedulerTask
        scheduler = Scheduler.get()

        scheduler.start_thread()



        class TimedTask(SchedulerTask):
            def __init__(my, **kwargs):
                super(TimedTask, my).__init__(**kwargs)
                my.index = kwargs.get("index")
                my.project_code = kwargs.get("project_code")

            def execute(my):
                try:
                    #Batch()
                    #Command.execute_cmd(timed_trigger)
                    Project.set_project(my.project_code)
                    timed_triggers[my.index].execute()
                except Exception, e:
                    print "Error running trigger"
                    raise
                finally:
示例#25
0
    def start(cls):

        print "Running Watch Folder ..."



        # Check whether the user define the drop folder path.
        # Default dop folder path: /tmp/drop
        parser = OptionParser()
        parser.add_option("-p", "--project", dest="project", help="Define the project_name.")
        parser.add_option("-d", "--drop_path", dest="drop_path", help="Define drop folder path")
        parser.add_option("-s", "--search_type", dest="search_type", help="Define search_type.")
        parser.add_option("-P", "--process", dest="process", help="Define process.")
        parser.add_option("-S", "--script_path",dest="script_path", help="Define script_path.")
        parser.add_option("-w", "--watch_folder_code",dest="watch_folder_code", 
				help="Define watch folder code. If no code is used, then it assumed that this process \
				is managed in a standalone script.")
        parser.add_option("-x", "--site",dest="site", help="Define site.")

        parser.add_option("-c", "--handler",dest="handler", help="Define Custom Handler Class.")
        (options, args) = parser.parse_args()

        



        if options.project != None :
            project_code= options.project
        else:
            raise Exception("No project specified")


        if options.drop_path!=None :
            drop_path= options.drop_path
        else:
            tmp_dir = Environment.get_tmp_dir()
            drop_path = "%s/drop" % tmp_dir
        print "    using [%s]" % drop_path
        if not os.path.exists(drop_path):
            os.makedirs(drop_path)

        if options.search_type!=None :
            search_type = options.search_type
        else:
            search_type = None


        if options.process!=None :
            process = options.process
        else:
            process= 'publish'

        if options.script_path != None :
            script_path = options.script_path
        else:
            script_path = None
          
        if options.site != None:
            site = options.site
        else:
            site = None

        if options.handler != None:
            handler = options.handler
        else:
            handler = None

        if options.watch_folder_code != None:
            watch_folder_code = options.watch_folder_code
        else:
            watch_folder_code = None
        if watch_folder_code:   
            # record pid in watch folder pid file
            pid = os.getpid()
            pid_file = "%s/log/watch_folder.%s" % (Environment.get_tmp_dir(), watch_folder_code)
            f = open(pid_file, "w")
            f.write(str(pid))
            f.close()

        Batch(project_code=project_code, site=site)



        task = WatchDropFolderTask(
            base_dir=drop_path, 
            site=site, 
            project_code=project_code,
            search_type=search_type, 
            process=process,
            script_path=script_path, 
            handler=handler,
            watch_folder_code=watch_folder_code
        )
        
        scheduler = Scheduler.get()
        scheduler.add_single_task(task, delay=1)
        scheduler.start_thread()
        return scheduler