예제 #1
0
    def run(self):
        logger.info('scheduler starting...')

        self.run_cron()

        while not self._quit:
            try:
                task = self.newtask_queue.get()
                if task is not None:
                    logger.info('Need handle task: %s' % str(task))

                    if Cron.is_cron(task):
                        self.cron.put(task)
                    elif Cron.is_stopped(task):
                        pass
                    else:
                        self.ready_queue.put(task)
                else:
                    # No ready task, take a nap.
                    time.sleep(0.2)
            except KeyboardInterrupt:
                logger.info('Keyboard Interrupt, bye bye.')
                self.stop()
            except Exception as e:
                logger.error(str(e))
예제 #2
0
    def run(self):
        logger.info("scheduler starting...")

        self.run_cron()

        while not self._quit:
            try:
                task = self.newtask_queue.get()
                if task is not None:
                    logger.info("Need handle task: %s" % str(task))

                    if Cron.is_cron(task):
                        self.cron.put(task)
                    elif Cron.is_stopped(task):
                        pass
                    else:
                        self.ready_queue.put(task)
                else:
                    # No ready task, take a nap.
                    time.sleep(0.2)
            except KeyboardInterrupt:
                logger.info("Keyboard Interrupt, bye bye.")
                self.stop()
            except Exception as e:
                logger.error(str(e))
예제 #3
0
def run(name, debug=False, dryrun=False, configfile="/etc/cron-runner.yaml"):
    registry.debug = debug
    registry.dryrun = dryrun
    registry.init_config(configfile)

    from cron import Cron

    job = Cron()
    job.run(name)
예제 #4
0
    def show_reminders(self, date_string=None):
        god_api = Api(User(user_type='god'))
        cron = Cron(god_api)
        reminders = cron.get_reminders_by_date(date_string)

        # Add a preview of how the text will be converted to html when sent.
        for r in reminders:
            r['html'] = markdown.markdown(r['body'])
        return reminders
예제 #5
0
class Scheduler(object):
    """Scheduler for fulmar.

    Only one Scheduler should be running in fulmar.
    It schedules all of the tasks, puts tasks from newtask queue and
    cron queue to ready queue.
    And it throw away tasks whose project is marked with 'is_stopped'.
    """

    def __init__(self, newtask_queue, ready_queue, cron_queue, projectdb):
        self._quit = False
        self.newtask_queue = newtask_queue
        self.ready_queue = ready_queue
        self.cron = Cron(cron_queue, ready_queue, projectdb)

    def run(self):
        logger.info("scheduler starting...")

        self.run_cron()

        while not self._quit:
            try:
                task = self.newtask_queue.get()
                if task is not None:
                    logger.info("Need handle task: %s" % str(task))

                    if Cron.is_cron(task):
                        self.cron.put(task)
                    elif Cron.is_stopped(task):
                        pass
                    else:
                        self.ready_queue.put(task)
                else:
                    # No ready task, take a nap.
                    time.sleep(0.2)
            except KeyboardInterrupt:
                logger.info("Keyboard Interrupt, bye bye.")
                self.stop()
            except Exception as e:
                logger.error(str(e))

    def run_cron(self):
        try:
            self.cron_thread = threading.Thread(target=self.cron.run)
            self.cron_thread.setDaemon(True)
            self.cron_thread.start()
        except RuntimeError:
            raise RuntimeError("Thread is called more than once on the same thread object")

    def stop(self):
        self._quit = True
예제 #6
0
class Scheduler(object):
    """Scheduler for fulmar.

    Only one Scheduler should be running in fulmar.
    It schedules all of the tasks, puts tasks from newtask queue and
    cron queue to ready queue.
    And it throw away tasks whose project is marked with 'is_stopped'.
    """
    def __init__(self, newtask_queue, ready_queue, cron_queue, projectdb):
        self._quit = False
        self.newtask_queue = newtask_queue
        self.ready_queue = ready_queue
        self.cron = Cron(cron_queue, ready_queue, projectdb)

    def run(self):
        logger.info('scheduler starting...')

        self.run_cron()

        while not self._quit:
            try:
                task = self.newtask_queue.get()
                if task is not None:
                    logger.info('Need handle task: %s' % str(task))

                    if Cron.is_cron(task):
                        self.cron.put(task)
                    elif Cron.is_stopped(task):
                        pass
                    else:
                        self.ready_queue.put(task)
                else:
                    # No ready task, take a nap.
                    time.sleep(0.2)
            except KeyboardInterrupt:
                logger.info('Keyboard Interrupt, bye bye.')
                self.stop()
            except Exception as e:
                logger.error(str(e))

    def run_cron(self):
        try:
            self.cron_thread = threading.Thread(target=self.cron.run)
            self.cron_thread.setDaemon(True)
            self.cron_thread.start()
        except RuntimeError:
            raise RuntimeError(
                'Thread is called more than once on the same thread object')

    def stop(self):
        self._quit = True
예제 #7
0
    def test_aggregate_queues_tasks(self):
        cron = Cron(self.internal_api)
        queue = taskqueue.Queue(name='default')

        # Since this test inherits a populated stub datastore, some tasks
        # have already been queued. Set that as the baseline.
        baseline = queue.fetch_statistics().tasks

        # Now aggregate the entities that have been pre-populated, which
        # includes one classroom and one cohort. This should queue 3 tasks: one
        # classroom roster, one cohort roster, and one cohort schedule.
        cron.aggregate()

        post_aggregation = queue.fetch_statistics().tasks
        self.assertEqual(post_aggregation - baseline, 3)
예제 #8
0
    def set_up(self):
        """Overrides PopulatedTestCase.set_up() to change consistency."""
        # This test suite IGNORES datastore inconsistency because it is only
        # interested in what content eventually gets into the search index.
        #
        # Consequently, this suite should only contain tests where this
        # assumption is both valid and required. Any other test should use the
        # default PopulatedTest Case, which assumes maximum inconsistency.
        self.consistency_probability = 1
        super(ConsistentSearchTest, self).set_up()

        # We'll want ready access to a Cron instance and the content search
        # index.
        self.cron = Cron(self.admin_api)
        indexer = Indexer.get_or_insert('the-indexer')
        indexer.delete_all_content()  # fresh start, no pre-populated stuff
        self.search_index = indexer.get_index()
예제 #9
0
def cron_jobs(request):
    #Dont bother processing the cron check if this user has already done so this session
    if request.session.get('cron_done', False):
        return {}

    ret = Cron.run_cron()
    request.session['cron_done'] = True

    return ret
예제 #10
0
파일: __init__.py 프로젝트: stsdc/svs
    def __init__(self):
        self.hostname = socket.gethostname()
        logger.debug("Initializing %s", self.hostname)
        Cron().check()
        while not Network().connect():
            sleep(10)
            continue

        self.marker_detector = MarkerDetector()
        self.socket_client = SocketClient("10.0.0.1", 50000)
예제 #11
0
    def dispatch(self, *args, **kwargs):
        """Do setup for cron handlers.

        - Jsons output
        - logs exception traces
        - Initializes Api-like Cron object
        """
        # There's no true sense of a current user making a request in cron
        # jobs, so invent an admin to run them. Don't get whether they've ever
        # been created before in unique index of users, because crons run all
        # the time. The user won't be saved to the datastore anyway.
        admin_user = User.create(check_uniqueness=False,
                                 email='',
                                 auth_id='',
                                 first_name='Cron',
                                 last_name='Job',
                                 is_admin=True)

        # The testing flag allows us to use unsaved user entities to create an
        # api. Normal operation requires that the user be saved to the
        # datastore. This is the only effect; e.g. a testing api still affects
        # the datastore.
        self.api = Api(admin_user, testing=True)

        self.cron = Cron(self.api)

        self.response.headers['Content-Type'] = (
            'application/json; charset=utf-8')

        try:
            # Call the descendant handler.
            BaseHandler.dispatch(self)
            # self.write_json(self.do(*args, **kwargs))
        except Exception as error:
            trace = traceback.format_exc()
            logging.error("{}\n{}".format(error, trace))
            response = {
                'error': True,
                'message': '{}: {}'.format(error.__class__.__name__, error),
                'trace': trace,
            }
            self.response.write(json.dumps(response))

        else:
            # If everything about the request worked out, but no data was
            # returned, put out a standard empty response.
            if not self.response.body:
                self.write(None)
예제 #12
0
    def do_wrapper(self, *args, **kwargs):
        """Do setup for cron handlers.

        - Jsons output
        - logs exception traces
        - Initializes Api-like Cron object
        """
        # Cron jobs should not be bothered with permissions. Give god access.
        self.cron = Cron(self.internal_api)

        try:
            self.write_json(self.do(*args, **kwargs))
        except Exception as error:
            trace = traceback.format_exc()
            logging.error("{}\n{}".format(error, trace))
            response = {
                'success': False,
                'message': '{}: {}'.format(error.__class__.__name__, error),
                'trace': trace,
            }
            self.write_json(response)
예제 #13
0
class CronApplication(object):
    def __init__(self):
        self.cron = Cron(config.tasks)
        set_exit_handler(self._exit_handler)

    def start(self):
        try:
            self.cron.start()
        except KeyboardInterrupt:
            self.stop()

    def stop(self):
        self.cron.stop()

    def _exit_handler(self, signum, frame=None):
        self.cron.stop()
예제 #14
0
class CronApplication(object):

    def __init__(self):
        self.cron = Cron(config.tasks)
        set_exit_handler(self._exit_handler)

    def start(self):
        try:
            self.cron.start()
        except KeyboardInterrupt:
            self.stop()

    def stop(self):
        self.cron.stop()

    def _exit_handler(self, signum, frame=None):
        self.cron.stop()
예제 #15
0
 def test_aggregate_user_clears_memcache(self):
     pd_params = {
         'variable': 's1__progress',
         'program': self.program.id,
         'activity': self.student_activities[0].id,
         'activity_ordinal': 1,
         'value': 100,
         'scope': self.student.id,
     }
     pd = Api(self.student).create('pd', pd_params)
     db.get(pd.key())  # simulate a delay before the aggregator runs
     Cron(self.internal_api).aggregate()
     # Make sure aggregation happened as expected.
     self.student = db.get(self.student.key())
     self.assertEquals(self.student.aggregation_data, {
         1: {
             'progress': 100
         },
         2: {
             'progress': None
         }
     })
     self.check_updated_rosters_cleared_from_memcache()
예제 #16
0
 def __init__(self, newtask_queue, ready_queue, cron_queue, projectdb):
     self._quit = False
     self.newtask_queue = newtask_queue
     self.ready_queue = ready_queue
     self.cron = Cron(cron_queue, ready_queue, projectdb)
예제 #17
0
#!/usr/bin/env python3
import os
from cron import Cron
from bottle import route, run, Bottle
import sys
from server import MyWSGIRefServer

# set the current dir as working directory
os.chdir(os.path.dirname(sys.argv[0]))

# Get default values
port = os.getenv('HK_DEFAULT_PORT', 8080)
time_interval = os.getenv('HK_TIME_INTERVAL', 300)


@route('/healthz')
def healthz():
    return "Health checker is alive"


# Create http server
app = Bottle()
server = MyWSGIRefServer(host='0.0.0.0', port=port)

# Run our background cron-job that does health checks every 5 min
background_checks = Cron(server, time_interval)

# Start bottle server
app.run(server=server)
예제 #18
0
    def test_aggregate_more_than_thirty_classrooms(self):
        """Aggregation uses an IN filter, which breaks App Engine when it has
        more than 30 elements in it. There should be code to handle this."""

        cron = Cron(self.internal_api)

        # To insulate the expected aggregation stats from changes to the
        # populate script, we'll create a separate cohort and classroom. For
        # larger things we'll rely on the stuff set by the populate script,
        # e.g. self.program.
        cohort = self.researcher_api.create(
            'cohort', {
                'name': 'DGN 2015',
                'code': 'lion mackerel',
                'program': self.program.id,
                'school': self.school.id,
            })
        db.get(cohort.key())
        self.researcher_api.associate('set_owner', self.school_admin, cohort)

        # Create 31 different students in as many different classrooms.
        classrooms, student_activities, students = [], [], []
        for x in range(30):
            c = self.school_admin_api.create(
                'classroom', {
                    'name': "English " + str(x),
                    'user': self.school_admin.id,
                    'program': self.program.id,
                    'cohort': cohort.id,
                })
            c = db.get(c.key())

            acts = self.school_admin_api.init_activities('student',
                                                         self.school_admin.id,
                                                         self.program.id,
                                                         cohort_id=cohort.id,
                                                         classroom_id=c.id)
            acts = db.get([a.key() for a in acts])

            s = self.public_api.create('user', {
                'user_type': 'student',
                'classroom': c.id
            })
            classrooms.append(c)
            student_activities += acts
            students.append(s)

        # Bring them all into full db consistency.
        db.get([s.key() for s in students])

        cron.aggregate()

        # All activities should show one student.
        student_activities = db.get([a.key() for a in student_activities])
        correct_stats = {
            'total_students': 1,
            'certified_students': 0,
            'certified_study_eligible_dict': {
                'n': 0,
                'completed': 0,
                'makeup_eligible': 0,
                'makeup_ineligible': 0,
                'uncoded': 0
            },
        }
        for a in student_activities:
            self.assertEqual(a.aggregation_data, correct_stats)
예제 #19
0
 def create_daily_task(token):
     Cron(token).create_daily_task()
예제 #20
0
파일: main.py 프로젝트: MG1105/yonder-gae
	def get(self):
		cron = Cron()
		cron.run()
예제 #21
0
def main():
    updates = get_updates()
    for update in updates:
        update.parseTextField()

    Cron().start()
예제 #22
0
    def run(self):
        with create_session() as session:
            #get api keys from server
            accounts_models = session.query(AccountModel).filter_by(
                active=True).all()
        print(f"we have {len(accounts_models)} accounts")
        for account_model in accounts_models:
            account = Account(account_model.api_key, account_model.api_secret,
                              self)
            account.start()
            self.accounts.append(account)
        self.scrapper = Scrapper(self.accounts)
        self.scrapper.start()
        time.sleep(30)

        self.cron = Cron(self.accounts)
        self.cron.start()

        scrapped_accounts_keys = [account.api_key for account in self.accounts]
        while self.keep_running:
            time.sleep(10)  # last value 300
            logger.info("Checking the database for any new accounts")
            if reactor.running:
                print("the twisted reactor is running")
            else:
                print("the twisted reactor is not running")

            with create_session() as session:
                db_account_models = session.query(AccountModel).filter_by(
                    active=True).all()
                unscrapped_accounts = []
                for account_model in db_account_models:
                    if account_model.api_key not in scrapped_accounts_keys:
                        logger.info(
                            f"We have detected a new account, {account_model.api_key}"
                        )
                        account = Account(account_model.api_key,
                                          account_model.api_secret, self)
                        account.start()
                        self.accounts.append(account)
                        if account not in self.cron.accounts:
                            logger.info(
                                "adding the new account to the cron accounts")
                            self.cron.accounts.append(account)
                        unscrapped_accounts.append(account)
                        scrapped_accounts_keys.append(account.api_key)
                if unscrapped_accounts:
                    logger.info("scrapping new accounts")
                    scrapper = Scrapper(unscrapped_accounts)
                    scrapper.start()

                db_account_models_keys = [
                    account.api_key for account in db_account_models
                ]
                for account in self.accounts:
                    if account.api_key not in db_account_models_keys:
                        logger.info(
                            f"account {account.api_key} has been deleted, stopping activities on it"
                        )
                        scrapped_accounts_keys.remove(account.api_key)
                        account.stop()
                        self.accounts.remove(account)
예제 #23
0
    def test_aggregate(self):
        """Test aggregation of pds to activities."""
        cron = Cron(self.internal_api)

        # To insulate the expected aggregation stats from changes to the
        # populate script, we'll create a separate cohort and classroom. For
        # larger things we'll rely on the stuff set by the populate script,
        # e.g. self.program.
        cohort = self.researcher_api.create(
            'cohort', {
                'name': 'DGN 2015',
                'code': 'lion mackerel',
                'program': self.program.id,
                'school': self.school.id,
            })
        self.researcher_api.associate('set_owner', self.school_admin, cohort)
        classroom = self.school_admin_api.create(
            'classroom', {
                'name': "English 201",
                'user': self.school_admin.id,
                'program': self.program.id,
                'cohort': cohort.id,
            })
        student_activities = self.school_admin_api.init_activities(
            'student',
            self.school_admin.id,
            self.program.id,
            cohort_id=cohort.id,
            classroom_id=classroom.id)
        db.get([cohort.key(), classroom.key()])
        db.get([a.key() for a in student_activities])

        # To test aggregating across multiple users, we'll need several
        # students
        student_params = {'user_type': 'student', 'classroom': classroom.id}

        mystery_finisher = self.public_api.create('user', student_params)
        absentee = self.public_api.create('user', student_params)
        refusee = self.public_api.create('user', student_params)
        expelee = self.public_api.create('user', student_params)
        mr_perfect = self.public_api.create('user', student_params)
        non_finisher = self.public_api.create('user', student_params)
        wrong_name = self.public_api.create('user', student_params)

        # This student will be in another classroom, and we won't update her,
        # proving that cohort aggregation re-queries more than just the changed
        # stuff.
        other_classroom = self.school_admin_api.create(
            'classroom', {
                'name': "English 202",
                'user': self.school_admin.id,
                'program': self.program.id,
                'cohort': cohort.id,
            })
        other_student_activities = self.school_admin_api.init_activities(
            'student',
            self.school_admin.id,
            self.program.id,
            cohort_id=cohort.id,
            classroom_id=other_classroom.id)
        other_student = self.public_api.create('user', {
            'user_type': 'student',
            'classroom': other_classroom.id
        })

        students = [
            mystery_finisher, absentee, refusee, expelee, mr_perfect,
            non_finisher, wrong_name
        ]
        student_keys = [s.key() for s in students]

        others = [other_student, other_classroom] + other_student_activities
        other_keys = [e.key() for e in others]

        ### Aggregate initial state

        # Assume and simulate that enough time passes between data recording
        # and cron execution that entities become consistent.
        db.get(student_keys)
        db.get(other_keys)

        cron.aggregate()

        # Every student have the same aggregation data for both activities
        # because no one has done anything yet. So just loop and check against
        # the same reference.
        for s in db.get(student_keys):
            self.assertFalse(s.certified)
            self.assertEqual(s.aggregation_data, {})

        # Both activities should be the same also
        a1, a2 = db.get([a.key() for a in student_activities])
        correct_stats = {
            'total_students': 7,
            'certified_students': 0,
            'certified_study_eligible_dict': {
                'n': 0,
                'completed': 0,
                'makeup_eligible': 0,
                'makeup_ineligible': 0,
                'uncoded': 0
            },
        }
        self.assertEqual(a1.aggregation_data, correct_stats)
        self.assertEqual(a2.aggregation_data, correct_stats)

        # The other activities should look like this (this is the last time
        # we'll have to check it because we won't be changing it any more):
        a1, a2 = db.get([a.key() for a in other_student_activities])
        correct_stats = {
            'total_students': 1,
            'certified_students': 0,
            'certified_study_eligible_dict': {
                'n': 0,
                'completed': 0,
                'makeup_eligible': 0,
                'makeup_ineligible': 0,
                'uncoded': 0
            },
        }
        self.assertEqual(a1.aggregation_data, correct_stats)
        self.assertEqual(a2.aggregation_data, correct_stats)

        # Check cohort (has our seven plus one other)
        cohort = db.get(cohort.key())
        correct_cohort_stats = {
            'unscheduled': 2,
            'scheduled': 0,
            'behind': 0,
            'completed': 0,
            'incomplete_rosters': 2,
            'total_students': 8,
            'certified_students': 0,
            'certified_study_eligible_dict': {
                'n': 0,
                'completed': 0,
                'makeup_eligible': 0,
                'makeup_ineligible': 0,
                'uncoded': 0
            },
        }
        self.assertEqual(cohort.aggregation_data[1], correct_cohort_stats)
        self.assertEqual(cohort.aggregation_data[2], correct_cohort_stats)

        ### Pretend the school admin just certified some students and aggregate
        ### again.

        # NOT changing mystery_finisher proves that the aggregator re-queries
        # for unchanged users associated with the same activity.
        certified_students = [
            absentee, refusee, expelee, mr_perfect, non_finisher
        ]
        for s in certified_students:
            s.certified = True
        db.put(certified_students)

        # Assume and simulate that enough time passes between data recording
        # and cron execution that entities become consistent.
        db.get(student_keys)

        cron.aggregate()

        # Every student should be the same for both activities.
        for s in db.get(student_keys):
            self.assertEqual(s.aggregation_data, {})

        # Both activities should be the same also
        a1, a2 = db.get([a.key() for a in student_activities])
        correct_stats = {
            'total_students': 7,
            'certified_students': 5,
            'certified_study_eligible_dict': {
                'n': 5,
                'completed': 0,
                'makeup_eligible': 0,
                'makeup_ineligible': 0,
                'uncoded': 5
            },
        }
        self.assertEqual(a1.aggregation_data, correct_stats)
        self.assertEqual(a2.aggregation_data, correct_stats)

        # Check cohort
        cohort = db.get(cohort.key())
        correct_cohort_stats = {
            'unscheduled': 2,
            'scheduled': 0,
            'behind': 0,
            'completed': 0,
            'incomplete_rosters': 2,
            'total_students': 8,
            'certified_students': 5,
            'certified_study_eligible_dict': {
                'n': 5,
                'completed': 0,
                'makeup_eligible': 0,
                'makeup_ineligible': 0,
                'uncoded': 5
            },
        }
        self.assertEqual(cohort.aggregation_data[1], correct_cohort_stats)
        self.assertEqual(cohort.aggregation_data[2], correct_cohort_stats)

        ### Simulate the first session, with two students absent and one who
        ### doesn't finish. Also schedule the first activity.

        absentee.status_codes[1] = 'A'  # absent
        refusee.status_codes[1] = 'PR'  # parent refusal
        expelee.status_codes[1] = 'E'  # expelled
        wrong_name.status_codes[1] = 'MWN'  # merge: wrong name
        db.put([absentee, refusee, expelee, wrong_name])

        progress_pds = []
        pd_params = {
            'variable': 's1__progress',
            'program': self.program.id,
            'activity': student_activities[0].id,
            'activity_ordinal': 1,
        }
        # Progress on activity 1 for those who finished.
        for s in [mr_perfect, mystery_finisher, wrong_name]:
            pd_params['value'] = '100'
            pd_params['scope'] = s.id
            progress_pds.append(Api(s).create('pd', pd_params))
        # Progress on activity 1 for those who didn't finish.
        pd_params['value'] = '50'
        pd_params['scope'] = non_finisher.id
        progress_pds.append(Api(non_finisher).create('pd', pd_params))

        a1.scheduled_date = datetime.date.today()
        a1.put()

        # Assume and simulate that enough time passes between data recording
        # and cron execution that entities become consistent.
        db.get([pd.key() for pd in progress_pds] +
               [absentee.key(),
                refusee.key(),
                expelee.key(),
                a1.key()])

        cron.aggregate()

        # Check that user stats are right.
        correct_stats = [
            {
                'progress': 100
            },  # mystery_finisher
            None,  # absentee
            None,  # refusee
            None,  # expelee
            {
                'progress': 100
            },  # mr_perfect
            {
                'progress': 50
            },  # non_finisher
            {
                'progress': 100
            },  # wrong_name
        ]
        for index, s in enumerate(students):
            s = db.get(s.key())
            if correct_stats[index] is None:
                self.assertEqual(s.aggregation_data, {})
            else:
                self.assertEqual(s.aggregation_data[1], correct_stats[index])

        # Check that activity stats are right.
        a1 = db.get(student_activities[0].key())
        correct_stats = {
            # Total has decreased b/c MWN students are dropped from the counts
            # completely. This is because they're not really a person, they're
            # a duplicate representation of a different real person.
            'total_students': 6,
            'certified_students': 5,
            'certified_study_eligible_dict': {
                'n': 4,
                'completed': 1,
                'makeup_eligible': 1,
                'makeup_ineligible': 1,
                'uncoded': 1
            },
        }
        self.assertEqual(a1.aggregation_data, correct_stats)
        # Activity 2 shouldn't register any of the progress we've made on
        # activity 1.
        a2 = db.get(student_activities[1].key())
        correct_stats = {
            'total_students': 6,
            'certified_students': 5,
            'certified_study_eligible_dict': {
                'n': 5,
                'completed': 0,
                'makeup_eligible': 0,
                'makeup_ineligible': 0,
                'uncoded': 5
            },
        }
        self.assertEqual(a2.aggregation_data, correct_stats)

        # Check cohort (again, similar, but with a larger 'all' total).
        cohort = db.get(cohort.key())
        correct_cohort_stats = {
            1: {
                'unscheduled': 1,
                'scheduled': 1,
                'behind': 0,
                'completed': 0,
                'incomplete_rosters': 2,
                'total_students': 7,
                'certified_students': 5,
                'certified_study_eligible_dict': {
                    'n': 4,
                    'completed': 1,
                    'makeup_eligible': 1,
                    'makeup_ineligible': 1,
                    'uncoded': 1
                },
            },
            2: {
                'unscheduled': 2,
                'scheduled': 0,
                'behind': 0,
                'completed': 0,
                'incomplete_rosters': 2,
                'total_students': 7,
                'certified_students': 5,
                'certified_study_eligible_dict': {
                    'n': 5,
                    'completed': 0,
                    'makeup_eligible': 0,
                    'makeup_ineligible': 0,
                    'uncoded': 5
                },
            }
        }
        self.assertEqual(cohort.aggregation_data, correct_cohort_stats)
예제 #24
0
def setup_cron():
    setup_fab_env()
    Cron.setup_backup()
예제 #25
0
 def __init__(self):
     self.cron = Cron(config.tasks)
     set_exit_handler(self._exit_handler)
예제 #26
0
class ConsistentSearchTest(PopulatedTestCase):
    """Test search indexing."""
    def set_up(self):
        """Overrides PopulatedTestCase.set_up() to change consistency."""
        # This test suite IGNORES datastore inconsistency because it is only
        # interested in what content eventually gets into the search index.
        #
        # Consequently, this suite should only contain tests where this
        # assumption is both valid and required. Any other test should use the
        # default PopulatedTest Case, which assumes maximum inconsistency.
        self.consistency_probability = 1
        super(ConsistentSearchTest, self).set_up()

        # We'll want ready access to a Cron instance and the content search
        # index.
        self.cron = Cron(self.admin_api)
        indexer = Indexer.get_or_insert('the-indexer')
        indexer.delete_all_content()  # fresh start, no pre-populated stuff
        self.search_index = indexer.get_index()

    def test_index_excludes_users(self):
        """Users are both 1) a kind that should never be indexed and 2) a
        placeholder for all kinds that aren't in config.indexed_models."""
        self.admin_api.create('User',
                              email='',
                              auth_id='',
                              last_name='Doe',
                              first_name='John')
        self.cron.index()
        result_dicts = [
            util.search_document_to_dict(doc)
            for doc in self.search_index.get_range()
        ]
        result_kinds = [Model.get_kind(d['uid']) for d in result_dicts]
        self.assertNotIn('User', result_kinds)

    def test_index_excludes_unlisted(self):
        """Content which is normally searchable is not if unlisted."""
        self.admin_api.create(
            'Lesson',
            id='unindexed-lesson',
            name=u'Unindexed Lesson',
            summary=u"R\xf8ckin'",
            tags=['tagone', 'tagtwo'],
            min_grade=5,
            max_grade=8,
            subjects=['reading', 'writing'],
            json_properties={
                'a': u'\xeb',
                'b': [1, 2, 3]
            },
            listed=False,
        )

        result_dicts = [
            util.search_document_to_dict(doc)
            for doc in self.search_index.get_range()
        ]
        result_kinds = [Model.get_kind(d['uid']) for d in result_dicts]
        self.assertNotIn('Lesson', result_kinds)

    def test_index_includes_lesson(self):
        """Lessons should be searchable after creation via put hook."""
        self.admin_api.create(
            'Lesson',
            id='indexed-lesson',
            name=u'Indexed Lesson',
            summary=u"R\xf8ckin'",
            tags=['tagone', 'tagtwo'],
            min_grade=5,
            max_grade=8,
            subjects=['reading', 'writing'],
            json_properties={
                'a': u'\xeb',
                'b': [1, 2, 3]
            },
            listed=True,
        )

        result_dicts = [
            util.search_document_to_dict(doc)
            for doc in self.search_index.get_range()
        ]
        result_kinds = [Model.get_kind(d['uid']) for d in result_dicts]
        self.assertIn('Lesson', result_kinds)

    def test_index_includes_practice(self):
        """Practices should be searchable after creation via put hook."""
        self.normal_api.create(
            'Practice',
            name=u'Indexed Practice',
            summary=u"R\xf8ckin'",
            tags=['super', u'c\xf8\xf8l', 'tagone'],
            subjects=['math', 'history', 'reading'],
            min_grade=0,
            max_grade=13,
            type='text',
            body=u"R\xf8ckin'",
            youtube_id='https://www.youtube.com/watch?v=6sJqTDaOrTg',
            has_files=True,
            pending=False,
            listed=True,
        )

        result_dicts = [
            util.search_document_to_dict(doc)
            for doc in self.search_index.get_range()
        ]
        result_kinds = [Model.get_kind(d['uid']) for d in result_dicts]
        self.assertIn('Practice', result_kinds)

    def test_index_includes_assessment(self):
        """Assessments should be searchable after creation via put hook."""
        self.admin_api.create(
            'Assessment',
            name=u'Indexed Assessment',
            url_name='indexed-assessment',
            description=u"R\xf8ckin'",
            num_phases=2,
            listed=True,
        )

        result_dicts = [
            util.search_document_to_dict(doc)
            for doc in self.search_index.get_range()
        ]
        result_kinds = [Model.get_kind(d['uid']) for d in result_dicts]
        self.assertIn('Assessment', result_kinds)

    def test_assessment_url_name_validation(self):
        """Assessment url names must adhere to a regex, else Exception."""
        def invalid_assessment():
            self.admin_api.create(
                'Assessment',
                name=u'Invalid Assessment',
                url_name='Invalid Assessment',  # bad: capitals, whitespace
                description=u"R\xf8ckin'",
                num_phases=2,
            )

        self.assertRaises(Exception, invalid_assessment)
예제 #27
0
 def __init__(self):
     self.cron = Cron(config.tasks)
     set_exit_handler(self._exit_handler)
    '--day', help='day to repeat the task, if 0 means the flag is not set')

parser.add_argument(
    '--stop-cron',
    help='stop all currently running cron under the provided username')

parser.add_argument('--directory', help='directory where files are present')

args = parser.parse_args()
print(args)

if __name__ == '__main__':
    if not args.cron_user:
        raise Exception('cron-name not defined. please look at help')

    cron = Cron(args.cron_user)

    if args.stop_cron:
        print('stopping cron job for username provided')
        cron.stopCron(args.cron_comment)
    else:
        if args.repeat:
            cron.setupCron(
                'python %smain.py --input-file %s --input-delimiter \'%s\' --batch-size \'%s\' --output-file \'%s\' --output-delimiter \'%s\''
                % (args.directory, args.input_file, args.input_delimiter,
                   args.batch_size, args.output_file, args.output_delimiter),
                args.cron_comment, args.hour, args.day)
        else:
            print(
                'the repeat argument is not set as true, not starting cron job'
            )
예제 #29
0
    def test_aggregation_does_not_change_modified_time(self):
        cron = Cron(self.internal_api)

        # Create a fake set of data to aggregate: A cohort, classroom,
        # student activities, a student, and a pd value for that student.
        cohort = self.researcher_api.create(
            'cohort', {
                'name': 'DGN 2015',
                'code': 'lion mackerel',
                'program': self.program.id,
                'school': self.school.id,
            })
        self.researcher_api.associate('set_owner', self.school_admin, cohort)
        classroom = self.school_admin_api.create(
            'classroom', {
                'name': "English 201",
                'user': self.school_admin.id,
                'program': self.program.id,
                'cohort': cohort.id,
            })
        student_activities = self.school_admin_api.init_activities(
            'student',
            self.school_admin.id,
            self.program.id,
            cohort_id=cohort.id,
            classroom_id=classroom.id)
        db.get([cohort.key(), classroom.key()])
        db.get([a.key() for a in student_activities])
        student_params = {'user_type': 'student', 'classroom': classroom.id}
        student = self.public_api.create('user', student_params)
        student = db.get(student.key())
        pd_params = {
            'variable': 's1__progress',
            'program': self.program.id,
            'activity': student_activities[0].id,
            'activity_ordinal': 1,
            'value': 100,
            'scope': student.id,
        }
        pd = Api(student).create('pd', pd_params)
        db.get(pd.key())

        # First prove that modified times ARE set in this context for normal
        # writes. The student's progress has NOT been written to the user
        # entity yet.
        modified_before = student.modified
        time.sleep(0.1)
        student.first_name = 'mister'
        student.put()
        modified_after = db.get(student.key()).modified
        self.assertEquals(student.aggregation_data, {
            1: {
                'progress': None
            },
            2: {
                'progress': None
            }
        })
        self.assertNotEqual(modified_before, modified_after)

        # Now aggregate, which should write the pd's progress value to the
        # user, but should NOT update the user's modified time.
        modified_before = modified_after
        time.sleep(0.1)
        cron.aggregate()
        student = db.get(student.key())
        modified_after = db.get(student.key()).modified
        self.assertEquals(student.aggregation_data, {
            1: {
                'progress': 100
            },
            2: {
                'progress': None
            }
        })
        self.assertEquals(modified_before, modified_after)
예제 #30
0
 def __init__(self, newtask_queue, ready_queue, cron_queue, projectdb):
     self._quit = False
     self.newtask_queue = newtask_queue
     self.ready_queue = ready_queue
     self.cron = Cron(cron_queue, ready_queue, projectdb)
예제 #31
0
    def test_aggregate_handles_duplicates(self):
        """If multiple progress pd, aggregator chooses the largest one."""
        # Create w/o the api to intentionally create duplicates
        pd_id1 = 'Pd_1.' + self.student.id
        pd_id2 = 'Pd_2.' + self.student.id
        pd_id3 = 'Pd_3.' + self.student.id
        pd_id4 = 'Pd_4.' + self.student.id
        pd1 = Pd(key_name=pd_id1,
                 id=pd_id1,
                 parent=self.student,
                 scope=self.student.id,
                 program=self.program.id,
                 activity_ordinal=1,
                 variable='s1__progress',
                 value='66',
                 public=True)
        pd2 = Pd(key_name=pd_id2,
                 id=pd_id2,
                 parent=self.student,
                 scope=self.student.id,
                 program=self.program.id,
                 activity_ordinal=1,
                 variable='s1__progress',
                 value='33',
                 public=True)
        pd3 = Pd(key_name=pd_id3,
                 id=pd_id3,
                 parent=self.student,
                 scope=self.student.id,
                 program=self.program.id,
                 activity_ordinal=2,
                 variable='s2__progress',
                 value='100',
                 public=True)
        pd4 = Pd(key_name=pd_id4,
                 id=pd_id4,
                 parent=self.student,
                 scope=self.student.id,
                 program=self.program.id,
                 activity_ordinal=2,
                 variable='s2__progress',
                 value='66',
                 public=True)

        # Put them in a confusing order on purpose, to try to get the
        # aggregator to process them from largest to smallest.
        db.put([pd1, pd3])
        db.put([pd2, pd4])

        # Prove that there are duplicates.
        duplicates = self.student_api.get('pd', {}, ancestor=self.student)
        self.assertEquals(len(duplicates), 4)

        # Aggregate and check results.
        cron = Cron(self.internal_api)
        cron.aggregate()
        student = db.get(self.student.key())
        self.assertEquals(student.aggregation_data, {
            1: {
                'progress': 66
            },
            2: {
                'progress': 100
            }
        })
        # Student should also have COM for s2 b/c they hit 100.
        self.assertEquals(student.get_status_code(2), 'COM')
예제 #32
0
#systemd_timers - List of systemd timers for the system and all users.
systemd_timers = []

#atd_jobs - List of atd jobs for all users.
atd_jobs = []

atd_paths = [
    '/var/spool/cron/atjobs',
    '/var/spool/at',
]

if __name__ == '__main__':

    jobs = []
    crontab_files = Cron.find_crontabs()
    cron_script_files = Cron.find_cron_scripts()
    #cron_files = list(map(lambda p: p.name, cron._find_cron_tabs()))

    #print("Found crontab files:")
    #print(crontab_files)

    for f in crontab_files:
        newjobs = Cron.parse_crontab(f)
        if newjobs:
            jobs.extend(newjobs)

    #print("Found cron script files:")
    #print(cron_script_files)
    for f in cron_script_files:
        jobs.extend([Cron.parse_cron_script(f)])