Esempio n. 1
0
def rop(files, libraries, goal_list, arch = archinfo.ArchAMD64(), log_level = logging.WARNING, validate_gadgets = False, strategy = None, bad_bytes = None):
  """Takes a goal resolver and creates a rop chain for it.  The arguments are as follows:
  $files - a list of tuples of the form (binary filename, gadget filename, load address).  The binary filename is the name of the
    file to generate a ROP chain for.  The gadget filename is a file that has been previously generated which contains the previously
    found gadgets (using the finder.py utility script).  If a gadget file hasn't been generated before, fill in None for this argument.
    The load address of the binary is only needed for libraries and PIE binaries.
  $libraries - a list of path's to the libraries to resolve symbols in.  Primarily this is useful for libc.  This list differs from
    the files list in that the entries in this list will not be used to find gadgets (and thus their address is not needed).
  $goal_list - a list of goals to attempt to compile a ROP chain for.  See goal.py for the format of the items in this list.
  $arch - the archinfo class representing the architecture of the binary
  $log_level - the level of logging to display during the ROP compiling process.  Note that pyvex logs a large amount of info to
    stderr during the compilation process and will not be affected by this value (sorry).
  $validate_gadgets - whether the gadgets should be verified using z3.  While this ensures that the ROP chain will work as expected,
    it makes the finding process faster and in practice shouldn't make a difference.
  $strategy - the strategy for find gadget (see gadget.py).  This can be either FIRST, BEST, or MEDIUM; where FIRST returns the first
    gadget that matches the desired type, BEST scans the found gadgets for the best one that matches the desired type, and MEDIUM
    is a compromise between the two.  In practice, the default (MEDIUM) should work for most things.
  $bad_bytes - a list of strings that a gadget will be rejected for if it contains them
  """
  file_handler = multifile_handler.MultifileHandler(files, libraries, arch, log_level)
  goal_resolver = goal.GoalResolver(file_handler, goal_list, log_level)

  gadgets = file_handler.find_gadgets(validate_gadgets, bad_bytes)
  if strategy != None:
    gadgets.set_strategy(strategy)
  gadget_scheduler = scheduler.Scheduler(gadgets, goal_resolver, file_handler, arch, log_level, bad_bytes)
  return gadget_scheduler.get_chain()
Esempio n. 2
0
    def __init__(self):
        self.scheduler = scheduler.Scheduler()
        self.observatory = Observer.at_site(
            "Anglo-Australian Observatory"
        )  # TODO: enter LAT and LON coordinates

        self.ra_current = None
        self.dec_current = None

        self.number_of_tiles = self.scheduler.number_of_all_tiles(
        )  # total number of all the tiles in this tiling run
        self.number_of_tiles_observed = 0

        self.unique_targets = set()
        self.repeats = Dictlist()
        self.total_number_cumulative_unique = 0  # EXcluding repeated observations
        self.total_number_cumulative = 0  # INcluding repeated observations

        self.number_of_tiles_observed = 0
        self.number_of_all_tiles = scheduler.number_of_all_tiles()

        self.dt = TimeDelta(0, format='sec')
        self.time_with_no_observing = TimeDelta(0, format='sec')

        # print output
        self.f = open(params_simulator.params['simulator_statistics_output'],
                      'wb')
        self.f.write(
            '# time; tile_id; Ntargets_in_this_tile; Nunique_targets_in_this_tile; Ntotal_number_cumulative_unique; Ntotal_number_cumulative; priority; weight; mag_max; json_filename \n'
        )

        # Print calibration files (times)
        self.fc = open(
            params_simulator.params['simulator_statistics_output_calibration'],
            'wb')
Esempio n. 3
0
File: irc.py Progetto: m481114/saxo
def start(base):
    # TODO: Check when two clients are running
    common.exit_cleanly()
    # http://stackoverflow.com/questions/11423225
    # IGN rather than DFL, otherwise Popen.communicate can quit saxo
    signal.signal(signal.SIGPIPE, signal.SIG_IGN)

    opt = configparser.ConfigParser(interpolation=None)
    config = os.path.join(base, "config")
    if not os.path.isfile(config):
        error("missing config file in: `%s`" % config, E_NO_CONFIG)
    opt.read(config)
    # TODO: Defaulting?
    # TODO: Warn if the config file is widely readable?

    sockname = os.path.join(base, "client.sock")
    serve(sockname, incoming)
    os.chmod(sockname, 0o600)

    # NOTE: If using os._exit, this doesn't work
    def remove_sock(sockname):
        if os.path.exists(sockname):
            os.remove(sockname)

    atexit.register(remove_sock, sockname)

    sched = scheduler.Scheduler(incoming)
    common.thread(sched.start, base)

    saxo = Saxo(base, opt)
    saxo.run()
Esempio n. 4
0
def _Cron():
    scheddb_path = configdb.Get('scheddb.path',os.path.join(sys.path[0],'scheddb.sqlite'))
    sched = scheduler.Scheduler(scheddb_path)
    while True:
        sched.Sleep()
        for job,data in sched.Tasks():
            code, response = request_dictionary[job](data)
Esempio n. 5
0
    def __init__(self, parent=None):
        setup()
        super(AutogramApp, self).__init__(parent)

        # Other windows
        self.add_photos_popup = AddPhotosPopup()
        self.instagram_login_popup = InstagramLoginPopup()
        self.instagram_login_popup.got_username.connect(self.receive_username)
        self.instagram_login_popup.got_password.connect(self.receive_password)

        # Buttons
        self.btn_add_photos = QPushButton('Add')
        self.btn_remove_photos = QPushButton('Remove')
        self.btn_view_photos = QPushButton('View')
        self.btn_upload_to_instagram = QPushButton("Upload Now")
        self.btn_login_instagram = QPushButton('Login')
        self.btn_upload_to_instagram.setEnabled(False)

        # Helpers
        self.scheduler = scheduler.Scheduler()
        self.autogram = instagram.Autogram(
            config.DEFAULT_USERNAME,
            config.DEFAULT_PASSWORD)  # TODO - This needs to start headless.

        mainLayout = QGridLayout()
        mainLayout.addWidget(self.posts_section(), 1, 0)
        mainLayout.addWidget(self.scheduler_section(), 1, 1)

        self.setMinimumWidth(250)
        self.setWindowTitle("Autogram")
        self.setLayout(mainLayout)
Esempio n. 6
0
def task1():
	x = [d for d in os.listdir(path)]
	for problem in x:
		if problem != ".DS_Store"  and problem != "LICENSE" and problem != "README.md" and problem != "edges":

			print(problem)
			rw = ReaderWriter.ReaderWriter()
			[tutorList, moduleList] = rw.readRequirements(path+problem)
			sch = scheduler.Scheduler(tutorList, moduleList)

			#this method will be used to create a schedule that solves task 1
			tt = sch.createSchedule()

			#This method will be used to create a schedule that solves task 2
			# tt = sch.createLabSchedule()

			#this method will be used to create a schedule that solves task 3
			# tt = sch.createMinCostSchedule()

			# print(str(tt.schedule))
			if tt.scheduleChecker(tutorList, moduleList):
				print("Schedule is legal. - TASK 1")
				print("Schedule has a cost of " + str(tt.cost))
				print("\n\n")
			else:
				print("PROBLEM")
				print(problem)
				exit()
Esempio n. 7
0
    def __init__(self,
                 num_steps,
                 dt,
                 t_start,
                 num_people,
                 do_minute_by_minute=False):

        # create a clock.
        self.clock = temporal.Temporal()
        self.clock.dt = dt
        self.clock.t_univ = t_start
        self.clock.set_time()

        # store the initial time [minutes] in universal time
        self.t_start = t_start

        # the final time of the simulation in universal time
        self.t_end = self.t_start + num_steps * dt

        # create a home
        self.home = home.Home(self.clock)

        # list of persons
        self.people = []

        # the schedule
        self.schedule = scheduler.Scheduler(clock=self.clock, num_people=num_people, \
                                            do_minute_by_minute=do_minute_by_minute)

        return
Esempio n. 8
0
 def do_post(self, **kwargs):
     #print kwargs
     try:
         scheduler.Scheduler(**kwargs).create_job()
         #print 'post job "%s" to cluster "%s" with id "%s"' % (script,cluster,job_uuid)
     except Exception, e:
         print e
Esempio n. 9
0
    def test_search_event(self):
        scheduling_queue = scheduler.Scheduler()
        # Set up the test set between 2018.08.31 17:45 ~ 2018.08.31 18:05
        # with one minute interval
        sample_time = datetime.strptime("2018.08.31 17:45", "%Y.%m.%d %H:%M")
        for i in range(20):
            minute = timedelta(minutes=i)
            scheduling_queue.register_event(sample_time + minute)
        scheduling_queue.show_event()

        # Verify search_event doesn't return None
        # when the element is already in the queue
        search_result = scheduling_queue.search_event("2018.08.31 17:45")
        assert search_result != None
        # Verify it returns correct number of result
        # when the end_time parameter is given
        search_result = scheduling_queue.search_event(
            end_time="2018.08.31 17:46")
        result_count = len(search_result)
        assert result_count == 2
        # Verify it returns correct number of result
        # when the start_time and end_time parameters are given
        search_result = scheduling_queue.search_event("2018.08.31 17:45",
                                                      "2018.08.31 17:48")
        result_count = len(search_result)
        assert result_count == 4
Esempio n. 10
0
def ra_tonight():
    '''
    Total number over time, plus number of stars with high priorities over time.
    '''
    '''
    Duration of the survey.
    Number of UNIQUE targets.
    '''
    s = scheduler.Scheduler()
    TILES = s.tiles  # with priorities and tile_ids
    tiles = {x.field_id: x for x in TILES}

    #~ data=np.loadtxt('test1/observing_plan_20180304.dat', dtype='string')
    data = np.loadtxt('test2_julij4/observing_plan_20180304.dat',
                      dtype='string')

    d = []
    for x in data:
        print x[3]
        tileid = int(x[3])
        tile = tiles[tileid]

        d.append([tile.ra, tile.dec])

    d = np.array(d)

    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(d[:, 0] / 15.0, d[:, 1], c='k')
    plt.show()
Esempio n. 11
0
def ra_every_day():
    s = scheduler.Scheduler()
    TILES = s.tiles  # with priorities and tile_ids
    tiles = {x.field_id: x for x in TILES}

    r = Dictlist()
    tls = Dictlist()

    unique = set()

    for x in data:
        date = datetime.date(year=int(x[0][:4]),
                             month=int(x[0][5:7]),
                             day=int(x[0][8:10]))
        tileid = int(x[1])
        tile = tiles[tileid]
        r[date] = [tile.ra, tile.dec]

    dates = sorted(r)
    p = []
    for date in dates:
        d = r[date]
        d = np.array(d)
        #~ p.append([np.min(d[:,0]), np.max(d[:,0])])
        p.append([d[0, 0], d[-1, 0]])
    p = np.array(p)

    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(dates, p[:, 0] / 15.0, c='k')
    ax.plot(dates, p[:, 1] / 15.0, c='r')
    ax.scatter(dates, p[:, 0] / 15.0, c='k')
    ax.scatter(dates, p[:, 1] / 15.0, c='r')
    plt.show()
Esempio n. 12
0
def ranking_versus_time():
    '''
    The last tiles to be observed (what fraction) have weights equal to 0.
    '''
    s = scheduler.Scheduler()
    TILES = s.tiles  # with priorities and tile_ids
    tiles = {x.field_id: x for x in TILES}

    r = Dictlist()
    for x in data:
        pr = [[] for xx in range(6)]
        date = datetime.date(year=int(x[0][:4]),
                             month=int(x[0][5:7]),
                             day=int(x[0][8:10]))
        tileid = int(x[1])
        tile = tiles[tileid]
        r[date] = tile.priority

    dates = sorted(r)
    #~ weights=[r[x] for x in dates]

    fig = plt.figure()
    ax = fig.add_subplot(111)
    for date in dates:
        v = r[date]
        ax.scatter([date for i in range(len(v))], v)
    ax.set_yscale('log')
    plt.show()
Esempio n. 13
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--parameters",
                        type=str,
                        required=True,
                        help="File containing parameters")
    parser.add_argument("--run",
                        action="store_true",
                        default=False,
                        help="Run simulation")
    parser.add_argument(
        "--relative",
        action="store_true",
        default=False,
        help="Store timestamps assuming first timestamp is at time 0")
    args = parser.parse_args()

    p = json.loads(open(args.parameters).read())
    params = json.loads(open(p["parameters"]).read())
    setup.process_functions(params)
    jobs = create_jobs(p["source_bucket"], params["bucket"], p["policy"],
                       p["prefix"], p["num_jobs"], p["duration"], p["offset"],
                       args.relative)
    if args.run:
        s = scheduler.Scheduler(p["policy"], p["timeout"], params)
        s.add_jobs(jobs)
        s.listen(p["num_invokers"], p["num_loggers"])
Esempio n. 14
0
    def test_task_launch(self):
        s = scheduler.Scheduler()

        state_data = {'slaves': [{'id': '1', 'pid': 'slave@foo-01'}]}
        s.update(None, state_data)

        self.assertEqual(len(s.monitor), 1)
        self.assertEqual(len(s.targets), 1)
        self.assertEqual(len(s.staging), 0)
        self.assertEqual(len(s.running), 0)

        # Mimic that we launched the task
        s.status_update(s.targets['foo-01'].task_id, mesos_pb2.TASK_STAGING)

        # Should now be in staging queue
        self.assertEqual(len(s.monitor), 0)
        self.assertEqual(len(s.staging), 1)
        self.assertEqual(len(s.running), 0)

        # Mimic that task is running
        s.status_update(s.targets['foo-01'].task_id, mesos_pb2.TASK_RUNNING)

        self.assertEqual(len(s.monitor), 0)
        self.assertEqual(len(s.staging), 0)
        self.assertEqual(len(s.running), 1)
Esempio n. 15
0
    def changeNodeInputs(self):
      # preference = self.prefCombo.currentText()
      preference = self.prefCombo.itemData(self.prefCombo.currentIndex()).toString()
      print >> sys.stderr, preference

      sc = scheduler.Scheduler()
      if preference == "performance":
        self.sanEdit.setText("4")
        self.wesEdit.setText("2")
        self.nehEdit.setText("1")
        self.harEdit.setText("1")
        self.costEdit.setText("$ %d"%sc.get_cost(4,2,1,1))
        self.timeEdit.setText("120 minutes")
      elif preference == "cost":
        self.sanEdit.setText("1")
        self.wesEdit.setText("1")
        self.nehEdit.setText("2")
        self.harEdit.setText("4")
        self.costEdit.setText("$ %d"%sc.get_cost(1,1,2,4))
        self.timeEdit.setText("265 minutes")
      elif preference == "manual":
        self.sanEdit.setText("0")
        self.wesEdit.setText("0")
        self.nehEdit.setText("0")
        self.harEdit.setText("0")
        self.costEdit.setText("$ %d"%sc.get_cost(0,0,0,0))
        self.timeEdit.setText("0")
Esempio n. 16
0
def simulate():
    '''
    Simulate observations.
    '''
    dates = find_dates_to_observe()

    s = scheduler.Scheduler()
    f = open(params_simulator.params['simulate_dates_file'], 'wb')
    for date in dates:
        print
        print 'START NIGHT:', date
        f.write(date.replace('-', '') + '\n')

        # skip if already exists (was computed during the previous simulations)
        #~ if os.path.isdir("/home/el")):
        #~ continue

        # Thin clouds. Observe only bright stars --> magnitude limit.
        mag_limit = True

        s.observing_plan(date=date, remove_twilight=True, bright_time=True)

        # Seeing
        #~ seeing=random.gauss(2.0, 1.0) # Maybe gauss is not the best distribution
    f.close()
Esempio n. 17
0
def main():
    '''获取输入参数并传递给爬虫程序'''
    parser = optparse.OptionParser(version = '%prog 1.0')
    parser.add_option('-u', '--url', dest = 'url', default = 'http://www.sina.com.cn', help = 'start the domain name')
    parser.add_option('-t', '--thread', dest = 'threadNum', default = 10, help = 'Number of threads')
    parser.add_option('-d', '--depth', dest = 'depth', default = 2, help = 'Crawling depth')
    parser.add_option('-l', '--loglevel', dest = 'loglevel', default = 3, help = 'Log level')
    parser.add_option('-k', '--key', dest = 'keywords', default = '', help = 'Search keywords' )
    parser.add_option('--model', dest = 'model', default = 0, help = 'Crawling mode: Static 0, Dynamic 1')
    parser.add_option('--dbfile', dest = 'dbName', default = 'spider.db', help = 'Database name')
    parser.add_option('--testself', dest = 'test', default = 0, help = 'Test self')

    (options, args) = parser.parse_args()

    startUrl = [options.url]
    threadNum = int(options.threadNum)
    depth = int(options.depth)
    loglevel = int(options.loglevel)
    keywords = options.keywords
    model = int(options.model)
    dbName = options.dbName
    test = int(options.test)

    '''
    print 'url:%s, threadNum:%d, depth:%d, loglevel:%d, keywords:%s, model:%d, dbName:%s' % (
    startUrl, threadNum, depth, loglevel, keywords, model, dbName)
    '''
    # 创建爬虫并启动程序
    spider = scheduler.Scheduler(dbName, threadNum, loglevel, startUrl, depth, keywords, model)
    spider.start()
Esempio n. 18
0
    def test_get_action(self):

        s = sch.Scheduler(self.task_clients,
                          self.wf,
                          nstills=1,
                          actions_per_still=1)
        f = 1
        a = s.get_action(self.dbi, f, ActionClass=self.FakeAction)
        self.assertNotEqual(a, None)  # everything is actionable in this test
        FILE_PROCESSING_LINKS = {
            'ACQUIRE_NEIGHBORS': 'UVCRE',
            'CLEAN_NEIGHBORS': 'UVCRRE_POT',
            'CLEAN_NPZ': 'CLEAN_NEIGHBORS',
            'CLEAN_UV': 'UVCR',
            'CLEAN_UVC': 'ACQUIRE_NEIGHBORS',
            'CLEAN_UVCR': 'COMPLETE',
            'CLEAN_UVCRE': 'UVCRRE',
            'CLEAN_UVCRR': 'CLEAN_NPZ',
            'CLEAN_UVCRRE': 'CLEAN_UVCR',
            'COMPLETE': None,
            'NEW': 'UV_POT',
            'NPZ': 'UVCRR',
            'NPZ_POT': 'CLEAN_UVCRE',
            'UV': 'UVC',
            'UVC': 'CLEAN_UV',
            'UVCR': 'CLEAN_UVC',
            'UVCRE': 'NPZ',
            'UVCRR': 'NPZ_POT',
            'UVCRRE': 'CLEAN_UVCRR',
            'UVCRRE_POT': 'CLEAN_UVCRRE',
            'UV_POT': 'UV'
        }
        # Jon: FIXME HARDWF # check this links to the next step
        self.assertEqual(a.task, FILE_PROCESSING_LINKS[self.dbi.files[f]])
Esempio n. 19
0
    def test_start(self):
        dbi = FakeDataBaseInterface(10)

        class FakeAction(sch.Action):
            def run_remote_task(self):
                dbi.files[self.obs] = self.task

        def all_done():
            for f in dbi.files:
                if dbi.get_obs_status(f) != 'COMPLETE':
                    return False
            return True

        task_clients = TaskClient(dbi, 'localhost', self.wf, port=TEST_PORT)

        s = sch.Scheduler(task_clients,
                          self.wf,
                          nstills=1,
                          actions_per_still=1,
                          blocksize=10)
        # myscheduler = StillScheduler(task_clients, wf,
        # actions_per_still=ACTIONS_PER_STILL, blocksize=BLOCK_SIZE,
        # nstills=len(STILLS))  # Init scheduler daemon
        t = threading.Thread(target=s.start,
                             args=(dbi, FakeAction),
                             kwargs={'sleeptime': 0})
        t.start()
        tstart = time.time()
        while not all_done() and time.time() - tstart < 1:
            time.sleep(.1)
        s.quit()
        for f in dbi.files:
            self.assertEqual(dbi.get_obs_status(f), 'COMPLETE')
Esempio n. 20
0
def setup_globals():
    global es, sched, logger
    # set the logging level according to the config
    logging.basicConfig(level=app.config["LOGGING_LEVEL"],
                        format=("%(asctime)s %(name)s [%(threadName)s]: "
                                "%(message)s"))
    # silence the given libraries, since they go crazy in debug-mode.
    for lib in "requests urllib3 elasticsearch".split():
        logging.getLogger(lib).setLevel(logging.WARNING)

    logger = logging.getLogger(__name__)

    # connect to the elasticDB
    es = elastic.Elastic(app.config["ELASTICSEARCH_HOST"],
                         app.config["ELASTICSEARCH_PORT"],
                         (app.config["ELASTICSEARCH_USER"],
                          app.config["ELASTICSEARCH_PASSWORD"]),
                         cert=app.config["ELASTICSEARCH_CAFILE"],
                         docs_index=app.config["ELASTICSEARCH_DOCS_INDEX"],
                         fs_dir=app.config["UPLOAD_DIR"])
    # start the scheduler
    sched = scheduler.Scheduler(es.es,
                                crawler_args={"elastic": es},
                                hour=2,
                                minute=0)
Esempio n. 21
0
 def test_get_new_active_obs(self):
     # s = sch.Scheduler(nstills=1, actions_per_still=1, blocksize=10)
     s = sch.Scheduler(self.task_clients, self.wf, nstills=1, actions_per_still=1, blocksize=10)
     tic = time.time()
     s.get_new_active_obs(self.dbi)
     print("time to execute get_new_active_obs: %s") % (time.time() - tic)
     self.assertEqual(len(s.active_obs), self.ntimes * self.npols)
Esempio n. 22
0
    def test_start(self):
        self.dbi = PopulatedDataBaseInterface(3, 1, test=True)
        obsnums = self.dbi.list_observations()

        class SuccessAction(sch.Action):

            def run_remote_task(me):
                me.dbi = self.dbi
                # print "Action setting {obsnum} status to {status}".format(
                #        status=me.task,obsnum=me.obs)
                me.dbi.set_obs_status(me.obs, me.task)

        def all_done():
            for obsnum in obsnums:
                print("I'm in the all_done")
                if self.dbi.get_obs_status(obsnum) != 'COMPLETE':
                    return False
                return True

        # s = sch.Scheduler(nstills=1, actions_per_still=1, blocksize=10)
        s = sch.Scheduler(self.task_clients, self.wf, nstills=1, actions_per_still=1, blocksize=10)
        t = threading.Thread(target=s.start, args=(self.dbi, SuccessAction))
        t.start()
        tstart = time.time()
        completion_time = len(FILE_PROCESSING_STAGES) * 3 * 0.2  # 0.2 s per file per step
        # print "time to completion:",completion_time,'s'
        while not all_done():
            if time.time() - tstart > completion_time:
                break
            time.sleep(10)
        s.quit()
        for obsnum in obsnums:
            self.assertEqual(self.dbi.get_obs_status(obsnum), 'COMPLETE')
Esempio n. 23
0
    def test_faulty(self):
        for i in xrange(1):
            dbi = FakeDataBaseInterface(10)

            class FakeAction(sch.Action):
                def __init__(self, f, task, neighbors, still, wf):
                    sch.Action.__init__(self, f, task, neighbors, still, wf, timeout=.01)

                def run_remote_task(self):
                    if random.random() > .5:
                        dbi.files[self.obs] = self.task

            def all_done():
                for f in dbi.files:
                    if dbi.get_obs_status(f) != 'COMPLETE':
                        return False
                return True
            task_clients = TaskClient(dbi, 'localhost', self.wf, port=TEST_PORT)

            s = sch.Scheduler(task_clients, self.wf, nstills=1, actions_per_still=1, blocksize=10)
            t = threading.Thread(target=s.start, args=(dbi, FakeAction), kwargs={'sleeptime': 0})
            t.start()
            tstart = time.time()
            while not all_done() and time.time() - tstart < 10:
                # print s.launched_actions[0][0].obs, s.launched_actions[0][0].task
                # print [(a.obs, a.task) for a in s.action_queue]
                time.sleep(.1)
            s.quit()
            # for f in dbi.files:
            #    print f, dbi.files[f]
            for f in dbi.files:
                self.assertEqual(dbi.get_obs_status(f), 'COMPLETE')
Esempio n. 24
0
    def test_clean_completed_actions(self):
        """
        todo
        """
        self.dbi = PopulatedDataBaseInterface(3, 1, test=True)

        class SuccessAction(sch.Action):
            def run_remote_task(me):
                me.dbi = self.dbi
                me.dbi.set_obs_status(me.obs, me.task)
                print("Action has status: %s") % (me.dbi.get_obs_status(
                    me.obs))
                return None

        # s = sch.Scheduler(nstills=1, actions_per_still=1, blocksize=10)
        s = sch.Scheduler(self.task_clients,
                          self.wf,
                          nstills=1,
                          actions_per_still=1,
                          blocksize=10)
        s.get_new_active_obs(self.dbi)
        s.update_action_queue(self.dbi, ActionClass=SuccessAction)
        a = s.pop_action_queue(0)
        s.launch_action(a)
        self.assertEqual(len(s.launched_actions[0]), 1)
        time.sleep(1)
        s.clean_completed_actions(self.dbi)
        self.assertEqual(len(s.launched_actions[0]), 0)
Esempio n. 25
0
    def test_task_failure(self):
        print "Task failure test"

        s = scheduler.Scheduler()

        state_data = {'slaves': [{'id': '1', 'pid': 'slave@foo-01'}]}
        s.update(None, state_data)

        self.assertEqual(len(s.monitor), 1)
        self.assertEqual(len(s.targets), 1)
        self.assertEqual(len(s.staging), 0)
        self.assertEqual(len(s.running), 0)

        # Mimic that we launched the task
        s.status_update(s.targets['foo-01'].task_id, mesos_pb2.TASK_STAGING)

        # Should now be in staging queue
        self.assertEqual(len(s.monitor), 0)
        self.assertEqual(len(s.staging), 1)
        self.assertEqual(len(s.running), 0)

        # Mimic that task failed. Verify that it is back in monitor queue.
        s.status_update(s.targets['foo-01'].task_id, mesos_pb2.TASK_LOST)

        self.assertEqual(len(s.monitor), 1)
        self.assertEqual(len(s.staging), 0)
        self.assertEqual(len(s.running), 0)
Esempio n. 26
0
 def test_get_new_active_obs(self):
     s = sch.Scheduler(self.task_clients,
                       self.wf,
                       nstills=1,
                       actions_per_still=1)
     s.get_new_active_obs(self.dbi)
     for i in xrange(self.nfiles):
         self.assertTrue(i in s.active_obs)
Esempio n. 27
0
 def test_add(self):
     test_scheduler = sc.Scheduler()
     test_scheduler.add('17:00', 'alarm')
     self.assertEqual(test_scheduler.queue.heap, [0, [1700, 'alarm']])
     test_scheduler.add('14:00', 'crawling')
     self.assertEqual(test_scheduler.queue.heap, [0, [1400, 'crawling'],[1700, 'alarm']])
     test_scheduler.add('10:00', 'test')
     self.assertEqual(test_scheduler.queue.heap, [0, [1000, 'test'], [1700, 'alarm'],[1400, 'crawling']])
Esempio n. 28
0
 def test_run(self):
     test_scheduler = sc.Scheduler()
     test_scheduler.add('17:00', 'alarm')
     test_scheduler.add('14:00', 'crawling')
     test_scheduler.run('14:00')
     self.assertEqual(test_scheduler.queue.heap, [0, [1700, 'alarm']])
     test_scheduler.run('17:00')
     self.assertEqual(test_scheduler.queue.heap, [0])
Esempio n. 29
0
 def test_update_action_queue(self):
     s = sch.Scheduler(self.task_clients, self.wf, nstills=1, actions_per_still=1, blocksize=10)
     s.get_new_active_obs(self.dbi)
     s.update_action_queue(self.dbi)
     self.assertEqual(len(s.action_queue), self.nfiles)
     self.assertGreater(s.action_queue[0].priority, s.action_queue[-1].priority)
     for a in s.action_queue:
         self.assertEqual(a.task, 'UV')
Esempio n. 30
0
 def test_should_isolate_all_jobs_with_8_hours_plus(self):
     for job in self.default_jobs:
         job['estimated_time'] = 20
     sch = scheduler.Scheduler(*self.default_interval)
     sch.load_jobs(self.default_jobs)
     result = list(map(lambda eg: list(eg.task_ids), sch._execution_groups))
     self.assertEqual(result, [[1], [3], [2]])
     self.assertEqual(sch.time_leftover, timedelta(days=-2, seconds=54000))