Exemplo n.º 1
0
 def test_improvement_with_ls(self):
     job_a = Job(0, [1, 5])
     job_b = Job(1, [5, 1])
     flow_shop_2 = Flowshop(2, 2, [job_a, job_b])
     swap_neighbors = create_swap_neighbors(flow_shop_2)
     insert_neighbors = create_insert_neighbors(flow_shop_2)
     scheduling = Ordonnancement(job_a.nb_op)
     scheduling.ordonnancer_liste_job([job_b, job_a])
     new_scheduling_swap = local_search_swap(scheduling,
                                             1,
                                             max_neighbors_nb=50,
                                             neighbors=swap_neighbors)
     new_scheduling_insert = local_search_insert(scheduling,
                                                 1,
                                                 max_neighbors_nb=50,
                                                 neighbors=insert_neighbors)
     self.assertTrue(scheduling.duree() == 11)
     self.assertTrue(new_scheduling_swap.duree() < scheduling.duree())
     self.assertTrue(new_scheduling_insert.duree() < scheduling.duree())
     self.assertTrue(new_scheduling_swap.duree() == 7)
     self.assertTrue(new_scheduling_insert.duree() == 7)
     self.assertEqual(len(new_scheduling_swap.sequence()), 2)
     self.assertEqual(len(new_scheduling_insert.sequence()), 2)
     for job in [job_a, job_b]:
         self.assertIn(job, new_scheduling_swap.sequence())
         self.assertIn(job, new_scheduling_insert.sequence())
Exemplo n.º 2
0
 def test_get(self):
     job = Job.create(db.session, **self.opts)
     res = Job.get(db.session, job.uid)
     self.assertEquals(res.json(), job.json())
     # fetch job with non-existent job id
     res = Job.get(db.session, "")
     self.assertEquals(res, None)
Exemplo n.º 3
0
def start():
    global job

    if not client.is_connected():
        client.connect(local_path / "data", "test-model", None)

    job_spec = request.json
    options = job_spec["options"]

    job = Job(options, client, logger)
    header = job.init_data_file()
    socketio.emit('job header', header)

    logger.log("Job started, connected to inputs {} and outputs {}".format(
        client.get_input_ids(), client.get_output_ids()))

    message = "\nOptimization started: {} designs / {} generations".format(
        job.num_designs, job.max_gen)
    socketio.emit('server message', {"message": message})

    if client.get_ss_connection() is not None:
        ss_path = client.get_dir(["jobs", job.get_id(), "images"])
        os.makedirs(ss_path, exist_ok=True)

    if client.get_connection():
        do_next()
    else:
        run_local()

    return jsonify({"status": "success", "job_id": str(job.get_path())})
Exemplo n.º 4
0
def add_job(context, request):
    job = Job()
    job._id = ''.join(random.choice(string.ascii_lowercase) for i in range(24))
    job.description = "Fake job"
    _JOBS.append(job)
    response = Response("OK", 200)
    return response
Exemplo n.º 5
0
 def test_create5(self):
     self.opts["options"] = {}
     self.opts["jobconf"] = []
     job = Job(**self.opts)
     self.assertEquals(job.getSparkOptions(), {"spark.driver.memory": self.opts["dmemory"],
         "spark.executor.memory": self.opts["ememory"]})
     self.assertEquals(job.getJobConf(), [])
Exemplo n.º 6
0
 def test_add1(self):
     self.opts["delay"] = 2000
     job = Job.create(db.session, **self.opts)
     self.assertEquals(job.submittime, job.createtime + 2000 * 1000)
     # negative delay equals to 0 seconds
     self.opts["delay"] = -2000
     job = Job.create(db.session, **self.opts)
     self.assertEquals(job.submittime, job.createtime)
Exemplo n.º 7
0
def get_job_status(base_url, client_id, client_secret, job_id):
    try:
        Job.get_status(base_url,
                       client_id,
                       client_secret,
                       job_id=str(job_id),
                       operations_path='src/get_job_status.json')
    except Exception as e:
        raise SystemExit(e)
Exemplo n.º 8
0
 def test_run(self):
     job = Job.create(db.session, **self.opts)
     Job.run(db.session, job)
     self.assertEqual(job.status, Job.RUNNING)
     # check that start time is close to current time
     self.assertTrue(job.starttime > utils.currentTimeMillis() - 2000)
     # should fail to run already running job
     with self.assertRaises(StandardError):
         Job.run(db.session, job)
Exemplo n.º 9
0
 def test_listRunning(self):
     arr = [("ready", Job.READY), ("running", Job.RUNNING), ("finished", Job.FINISHED),
         ("running", Job.RUNNING)]
     for name, status in arr:
         job = Job.create(db.session, **self.opts)
         job.name = name
         job.status = status
         db.session.commit()
     jobs = Job.listRunning(db.session)
     self.assertEqual(len(jobs), 2)
     self.assertEqual([x.status for x in jobs], [Job.RUNNING, Job.RUNNING])
Exemplo n.º 10
0
 def test_create(self):
     job = Job(**self.opts)
     self.assertEquals(job.name, self.opts["name"])
     self.assertEquals(job.status, self.opts["status"])
     self.assertEquals(job.createtime, self.opts["createtime"])
     self.assertEquals(job.submittime, self.opts["submittime"])
     self.assertEquals(job.entrypoint, self.opts["entrypoint"])
     self.assertEquals(job.jar, self.opts["jar"])
     self.opts["spark.driver.memory"] = self.opts["dmemory"]
     self.opts["spark.executor.memory"] = self.opts["ememory"]
     self.assertEquals(job.getSparkOptions(), self.opts["options"])
     self.assertEquals(job.getJobConf(), self.opts["jobconf"])
Exemplo n.º 11
0
def test_sift_up(create_scheduler_with_jobs):
    scheduler = create_scheduler_with_jobs

    def job_f():
        return None

    job_f_time = datetime.datetime.now() + datetime.timedelta(minutes=2)
    job_f_hours, job_f_minutes = job_f_time.hour, job_f_time.minute
    job = Job(job_f, str(job_f_hours) + ":" + str(job_f_minutes))
    job.next_run = job_f_time
    scheduler.jobs.append(job)
    scheduler.sift_up(scheduler.jobs, len(scheduler.jobs) - 1)
    assert scheduler.jobs[0].name == "job_f"
Exemplo n.º 12
0
 def test_listRunnable(self):
     arr = [("ready", Job.READY), ("running", Job.RUNNING), ("finished", Job.FINISHED),
         ("running", Job.RUNNING), ("delayed", Job.DELAYED)]
     for name, status in arr:
         job = Job.create(db.session, **self.opts)
         job.name = name
         job.status = status
         if status is Job.DELAYED:
             job.submittime = job.submittime - 10000
             job.priority = job.submittime / 1000L
         db.session.commit()
     jobs = Job.listRunnable(db.session, 5, utils.currentTimeMillis())
     self.assertEqual(len(jobs), 2)
     self.assertEqual([x.name for x in jobs], ["delayed", "ready"])
     self.assertEqual([x.status for x in jobs], [Job.DELAYED, Job.READY])
Exemplo n.º 13
0
 def test_list(self):
     job = Job.create(db.session, **self.jobOpts)
     self.opts["job_uid"] = job.uid
     for x in range(5):
         Timetable.create(db.session, **self.opts)
     arr = Timetable.list(db.session, None)
     self.assertEquals(len(arr), 5)
Exemplo n.º 14
0
 def test_finish(self):
     job = Job.create(db.session, **self.opts)
     # should not be able to finish not running job
     with self.assertRaises(StandardError):
         Job.finish(db.session, job)
     # Launch job and finish it
     Job.run(db.session, job)
     Job.finish(db.session, job)
     self.assertEqual(job.status, Job.FINISHED)
     self.assertTrue(job.finishtime > utils.currentTimeMillis() - 2000)
     # should fail to finish already finished job
     with self.assertRaises(StandardError):
         Job.finish(db.session, job)
Exemplo n.º 15
0
 def test_execCommand(self):
     job = Job.create(db.session, **self.opts)
     cmd = job.execCommand(sparkContext)
     self.assertEqual(sorted(cmd), sorted(["spark-submit", "--name", "test-job", "--master",
         "spark://sandbox:7077", "--conf", "%s=%s" % (SPARK_OCTOHAVEN_JOB_ID, job.uid),
         "--conf", "spark.executor.memory=4g", "--conf", "spark.driver.memory=4g",
         "--conf", "spark.file.overwrite=true", "--conf", "spark.shuffle.spill=true",
         "--class", "com.test.Main", "/tmp/file.jar", "a", "b", "c"]))
Exemplo n.º 16
0
 def test_execCommandOrder(self):
     job = Job.create(db.session, **self.opts)
     cmd = job.execCommand(sparkContext, ["foo=bar"], {"spark.sql.shuffle.partitions": 200})
     anon = [x for x in cmd if x.startswith("--") or x == "spark-submit" or x.endswith(".jar")]
     self.assertEqual(anon, ["spark-submit", "--name", "--master", "--conf", "--conf", "--conf",
         "--conf", "--conf", "--conf", "--class", "/tmp/file.jar"])
     self.assertEqual(len(cmd), 24)
     self.assertEqual(cmd[20:], ["a", "b", "c", "foo=bar"])
Exemplo n.º 17
0
    def test_stats(self):
        job = Job.create(db.session, **self.jobOpts)
        self.opts["job_uid"] = job.uid
        timetable = Timetable.create(db.session, **self.opts)

        latestStats = None
        for x in range(10):
            spawnedJob = Job.create(db.session, **self.jobOpts)
            latestStats = TimetableStats(timetable_uid=timetable.uid, job_uid=spawnedJob.uid,
                createtime=x)
            db.session.add(latestStats)
            db.session.commit()
        # retrieve stats and compare with iterator
        stats = timetable.json()["stats"]
        self.assertEquals(stats["jobs"], 10)
        self.assertEquals(stats["last_time"], latestStats.createtime)
        self.assertEquals(stats["last_job_uid"], latestStats.job_uid)
Exemplo n.º 18
0
 def test_close(self):
     job = Job.create(db.session, **self.opts)
     self.assertEquals(job.status, Job.READY)
     Job.close(db.session, job)
     self.assertEquals(job.status, Job.CLOSED)
     # try closing already closed job
     with self.assertRaises(StandardError):
         Job.close(db.session, job)
     with self.assertRaises(StandardError):
         job.status = Job.RUNNING
         Job.close(db.session, job)
     with self.assertRaises(StandardError):
         job.status = Job.FINISHED
         Job.close(db.session, job)
Exemplo n.º 19
0
 def test_cancel(self):
     job = Job.create(db.session, **self.jobOpts)
     self.opts["job_uid"] = job.uid
     timetable = Timetable.create(db.session, **self.opts)
     Timetable.cancel(db.session, timetable)
     timetable.status = Timetable.CANCELLED
     # try cancelling already cancelled timetable
     with self.assertRaises(StandardError):
         Timetable.cancel(db.session, timetable)
Exemplo n.º 20
0
 def test_pause(self):
     job = Job.create(db.session, **self.jobOpts)
     self.opts["job_uid"] = job.uid
     timetable = Timetable.create(db.session, **self.opts)
     Timetable.pause(db.session, timetable)
     timetable.status = Timetable.PAUSED
     # try pausing already paused timetable
     with self.assertRaises(StandardError):
         Timetable.pause(db.session, timetable)
Exemplo n.º 21
0
 def __init__(self, infos: List[str]):
     self.name = infos.pop(0)
     self.komplete_info = infos.pop(0)
     jobs_and_machines = infos.pop(0).split(" ")
     self.job_count = int(jobs_and_machines[1])
     self.machine_count = int(jobs_and_machines[2])
     self.jobs = list()
     for job_id, job_info in enumerate(infos):
         self.jobs.append(Job(job_info, job_id + 1))
Exemplo n.º 22
0
 def test_get(self):
     job = Job.create(db.session, **self.jobOpts)
     self.opts["job_uid"] = job.uid
     timetable = Timetable.create(db.session, **self.opts)
     res = Timetable.get(db.session, timetable.uid)
     self.assertEquals(res.json(), timetable.json())
     # test non-existent key
     res = Timetable.get(db.session, "")
     self.assertEquals(res, None)
Exemplo n.º 23
0
 def test_resume(self):
     job = Job.create(db.session, **self.jobOpts)
     self.opts["job_uid"] = job.uid
     timetable = Timetable.create(db.session, **self.opts)
     timetable.status = Timetable.PAUSED
     Timetable.resume(db.session, timetable)
     timetable.status = Timetable.ACTIVE
     # try resuming already active timetable
     with self.assertRaises(StandardError):
           Timetable.resume(db.session, timetable)
Exemplo n.º 24
0
 def test_vasp_001_shell_job(self):
     """
     Extracts a job from a vasp calculation and asserts the results.
     """
     config = Job("External Job", os.path.join(FIXTURES_DIR,
                                               "vasp/test-001")).to_json()
     self._clean_job_config(config)
     self.assertDeepAlmostEqual(
         config,
         read_json(os.path.join(FIXTURES_DIR, "vasp", "shell-job.json")))
Exemplo n.º 25
0
 def test_json(self):
     job = Job.create(db.session, **self.jobOpts)
     self.opts["job_uid"] = job.uid
     timetable = Timetable.create(db.session, **self.opts)
     res = Timetable.get(db.session, timetable.uid).json()
     self.assertEquals(res["name"], timetable.name)
     self.assertEquals(res["status"], timetable.status)
     self.assertEquals(res["createtime"], timetable.createtime)
     self.assertEquals(res["canceltime"], timetable.canceltime)
     self.assertEquals(res["cron"], timetable.cronExpression().json())
     self.assertEquals(res["job"], timetable.job.json())
Exemplo n.º 26
0
 def test_json(self):
     job = Job.create(db.session, **self.opts)
     obj = job.json()
     self.assertEquals(obj["name"], job.name)
     self.assertEquals(obj["status"], job.status)
     self.assertEquals(obj["createtime"], job.createtime)
     self.assertEquals(obj["submittime"], job.submittime)
     self.assertEquals(obj["entrypoint"], job.entrypoint)
     self.assertEquals(obj["jar"], job.jar)
     self.assertEquals(obj["options"], job.getSparkOptions())
     self.assertEquals(obj["jobconf"], job.getJobConf())
Exemplo n.º 27
0
class TestJob(unittest.TestCase):
    """ Fixture for the Job class """

    def setUp(self):
        text = u'{"created_at":"Fri Oct 30 15:29:45 +0000 2015","id":659789756637822976,"id_str":"659789756637822976","text":"@IKEA complain https:\/\/t.co\/GzyHJC6jMI"}'
        self.job = Job(text)
        self.job.clean_data()

    def tearDown(self):
        self.job = None

    def test_print_job(self):
        """ Correct format ensures proper object consistency """
        expected_output = "@IKEA complain https://t.co/GzyHJC6jMI (timestamp: Fri Oct 30 15:29:45 +0000 2015)\n"
        str_job = str(self.job)
        self.assertEqual(str_job, expected_output)

    def test_validity(self):
        """ Check if validity flags for text are set """
        self.assertEqual(self.job.has_nonascii, False)
        self.assertEqual(self.job.is_valid, True)
Exemplo n.º 28
0
    def test_registerNewJob(self):
        job = Job.create(db.session, **self.jobOpts)
        self.opts["job_uid"] = job.uid
        timetable = Timetable.create(db.session, **self.opts)

        spawnedJob = None
        for x in range(7):
            spawnedJob = Timetable.registerNewJob(db.session, timetable)
            time.sleep(0.05)
        # retrieve stats and compare with iterator
        stats = timetable.json()["stats"]
        self.assertEquals(stats["jobs"], 7)
        self.assertEquals(stats["last_job_uid"], spawnedJob.uid)
Exemplo n.º 29
0
 def test_add(self):
     with self.assertRaises(StandardError):
         Timetable.create(db.session, **{})
     with self.assertRaises(StandardError):
         Timetable.create(db.session, **{"name": "test"})
     with self.assertRaises(StandardError):
         Timetable.create(db.session, **{"name": "test", "cron": "* * * * * *"})
     # test correct input, though it has more keys than required
     # should fail since there is no such job exists
     job = Job.create(db.session, **self.jobOpts)
     self.opts["job_uid"] = job.uid
     timetable = Timetable.create(db.session, **self.opts)
     self.assertTrue(timetable.createtime > utils.currentTimeMillis() - 5000)
     self.assertEquals(timetable.job.json(), job.json())
Exemplo n.º 30
0
 def test_jobCopy(self):
     job = Job.create(db.session, **self.opts)
     copy = job.jobCopy(name="a", status=Job.READY, priority=1, createtime=2L, submittime=2L)
     self.assertEquals(copy.uid, None)
     self.assertEquals(copy.name, "a")
     self.assertEquals(copy.status, Job.READY)
     self.assertEquals(copy.priority, 1)
     self.assertEquals(copy.createtime, 2L)
     self.assertEquals(copy.submittime, 2L)
     self.assertEquals(copy.sparkappid, None)
     self.assertEquals(copy.starttime, None)
     self.assertEquals(copy.finishtime, None)
     # these properties should be the same
     self.assertEquals(copy.options, job.options)
     self.assertEquals(copy.jobconf, job.jobconf)
     self.assertEquals(copy.entrypoint, job.entrypoint)
     self.assertEquals(copy.jar, job.jar)
Exemplo n.º 31
0
def start():
    session = SignallingSession(db)
    session.begin(subtransactions=True)
    # We fetch all jobs for the queue when invoked
    jobs = Job.listRunning(session)
    scheduler.logger.info("Fetched %s jobs to analyze", len(jobs))
    running = [x for x in jobs if x.status == Job.RUNNING]
    for x in running:
        x.status = Job.CLOSED
        session.commit()
        scheduler.logger.info("Running job '%s' is closed, cannot resolve process id", x.uid)
    session.commit()
    session.close()
    # Start sampler
    sampler.start()
    scheduler.logger.info("Job scheduler is started, refresh interval = %s, number of slots = %s",
        REFRESH_INTERVAL, NUM_SLOTS)
Exemplo n.º 32
0
 def test_list(self):
     i = 0L
     for x in range(10):
         Job.create(db.session, **self.opts)
     arr = Job.list(db.session, None)
     self.assertEquals(len(arr), 10)
     times = [x.createtime for x in arr]
     self.assertEquals(times, sorted(times, reverse=True))
     # test selecting status
     arr = Job.list(db.session, Job.READY, limit=1)
     self.assertEquals(len(arr), 1)
     arr = Job.list(db.session, Job.READY, limit=5)
     self.assertEquals(len(arr), 5)
     arr = Job.list(db.session, Job.READY, limit=0)
     self.assertEquals(len(arr), 10)
     arr = Job.list(db.session, Job.READY, limit=-1)
     self.assertEquals(len(arr), 10)
Exemplo n.º 33
0

# Make sure the game is properly defined
validate(src.utils.game_module)
# For debugging with heapy.
if args.debug:
    src.debug.init_debug(comm.Get_rank())
    send = src.debug.debug_send(comm.send)
    recv = src.debug.debug_recv(comm.recv)
    abort = src.debug.debug_abort(comm.Abort)

initial_position = src.utils.game_module.initial_position()

process = Process(comm.Get_rank(),
                  comm.Get_size(),
                  comm,
                  send,
                  recv,
                  abort,
                  stats_dir=args.statsdir)

if process.rank == process.root:
    initial_gamestate = GameState(GameState.INITIAL_POS)
    initial_job = Job(Job.LOOK_UP, initial_gamestate, process.rank,
                      Job.INITIAL_JOB_ID)
    process.add_job(initial_job)

process.run()

comm.Barrier()
Exemplo n.º 34
0
 def test_canClose(self):
     job = Job(**self.opts)
     self.assertEquals(job.canClose(), True)
     for status in [Job.CLOSED, Job.RUNNING, Job.FINISHED]:
         job.status = status
         self.assertEquals(job.canClose(), False)
Exemplo n.º 35
0
 def test_add(self):
     job = Job.create(db.session, **self.opts)
     arr = Job.list(db.session, None)
     self.assertTrue(len(arr), 1)
Exemplo n.º 36
0
def action(sampler):
    if not sampler:
        scheduler.logger.error("Sampler is undefined, exiting")
        return
    session = SignallingSession(db)
    try:
        sampler.logger.info("Start refreshing application state")
        sampler.incrementNumRuns()
        lock.acquire()
        for uid, pid in sampler.pool.items():
            if updateProcessStatus(pid) >= 0:
                job = Job.get(session, uid)
                if not job:
                    sampler.logger.warn("Job '%s' does not exist in database, updated skipped", uid)
                else:
                    Job.finish(session, job)
                sampler.removeFromPool(uid)
            else:
                sampler.logger.info("Process '%s' is still running, job uid: '%s'", pid, uid)
        # Check how many pids are left. Compare against NUM_SLOTS, if comparison yields false,
        # skip execution, otherwise it yields true, and we proceed with number of free slots
        freeSlots = NUM_SLOTS - len(sampler.pool)
        if freeSlots <= 0:
            sampler.logger.info("All %s slots are taken, cannot launch job, skipped", NUM_SLOTS)
            sampler.logger.debug("Free slots: %s, pool size: %s, numSlots: %s", freeSlots,
                len(sampler.pool), NUM_SLOTS)
        else:
            # Check how many jobs are running at the moment by checking status of the cluster and
            # requesting number of running applications, if number of applications is equal or more
            # than NUM_SLOTS, skip execution, otherwise compute number of jobs to launch and proceed.
            sparkStatus = sparkContext.clusterStatus()
            if sparkStatus == DOWN:
                sampler.logger.info("Cluster %s[%s] is down, will try again later",
                    sparkContext.getMasterAddress(), sparkContext.getUiAddress())
            else:
                apps = sparkContext.clusterRunningApps()
                freeSlots = NUM_SLOTS - len(apps)
                if freeSlots <= 0:
                    sampler.logger.info("There are %s applications running already, cannot " + \
                        "launch job, skipped", len(apps))
                    sampler.logger.debug("Free slots: %s, apps: %s, numSlots: %s", freeSlots,
                        len(apps), NUM_SLOTS)
                else:
                    # Fetch jobs active (runnable) jobs using Job API based on number of free slots,
                    # acquired earlier. Start jobs in the list, if any. Report when no jobs found.
                    currentTime = utils.currentTimeMillis()
                    sampler.logger.debug("Fetch jobs with session %s, free slots %s, time %s",
                        session, freeSlots, currentTime)
                    runnableJobs = Job.listRunnable(session, freeSlots, currentTime)
                    sampler.logger.info("Registering %s jobs", len(runnableJobs))
                    for job in runnableJobs:
                        pid = launchSparkJob(job)
                        Job.run(session, job)
                        sampler.addToPool(job.uid, pid)

        session.commit()
    except Exception as e:
        sampler.logger.error("Sampler encountered error, execution skipped")
        sampler.logger.exception(e.message)
    finally:
        lock.release()
        session.close()
        if sampler.enabled:
            sampler.logger.debug("Prepared to be invoked in %s seconds", sampler.interval)
            timer = Timer(sampler.interval, action, [sampler])
            timer.daemon = True
            timer.start()
        else:
            sampler.logger.info("Sampler stopped")
Exemplo n.º 37
0
import unittest
from src.job import Job

job_1 = Job(1, [1, 1, 1, 1, 10])
job_2 = Job(2, [1, 1, 1, 4, 8])
job_3 = Job(3, [1, 1, 1, 4, 8])
job_2b = Job(2, [1, 1, 1, 4, 8])


class TestJobClassMethods(unittest.TestCase):
    def test_eq(self):
        self.assertNotEqual(job_1, job_2)
        self.assertNotEqual(job_2, job_3)
        self.assertEqual(job_2, job_2b)


if __name__ == '__main__':
    unittest.main()
Exemplo n.º 38
0
import unittest
from src import initial_population as ip
from src.job import Job
from src.flowshop import Flowshop
from src.ordonnancement import Ordonnancement

MAXINT = 10000

job_1 = Job(1, [3, 2, 1, 2, 10])
job_2 = Job(2, [8, 4, 0, 2, 8])
job_3 = Job(3, [12, 1, 7, 5, 2])
job_4 = Job(4, [2, 5, 9, 3, 3])
job_5 = Job(5, [1, 3, 1, 1, 1])
l_job = [job_1, job_2, job_3, job_4, job_5]
flowshop_1 = Flowshop(5, 5, l_job)
flowshop_2 = Flowshop()
flowshop_2.definir_par("data\\dataset3\\jeu2.txt")

seq_1 = [job_3, job_1, job_5, job_2, job_4]
seq_2 = [job_1, job_2, job_4, job_3, job_5]
seq_3 = [job_1, job_4, job_3, job_2, job_5]


class MyTestCase(unittest.TestCase):
    def test_initial_population_warnings(self):
        size = 100
        with self.assertWarns(Warning):  # Deterministic prop too high
            ip.initial_pop(flowshop_1, 0.5, 0.5, False, size)
        size = 150
        with self.assertWarns(Warning):  # Size too high
            ip.initial_pop(flowshop_1, 1.0, 0.0, False, size)
Exemplo n.º 39
0
import unittest
from src.ordonnancement import Ordonnancement
from src.job import Job

job_1 = Job(1, [1, 1, 1, 1, 10])
job_2 = Job(2, [1, 1, 1, 4, 8])
job_3 = Job(3, [2, 1, 3, 5, 1])
job_4 = Job(4, [2, 5, 5, 3, 3])
job_5 = Job(5, [1, 1, 3, 7, 1])
ord_1 = Ordonnancement(job_1.nb_op)
ord_2 = Ordonnancement(job_1.nb_op)
ord_3 = Ordonnancement(job_1.nb_op)
ord_1.ordonnancer_liste_job([job_2, job_3, job_4, job_5, job_1])
ord_2.ordonnancer_liste_job([job_1, job_4, job_5, job_2, job_3])
ord_3.ordonnancer_liste_job([job_2, job_3, job_4, job_5, job_1])


class TestOrdonnancementClassMethods(unittest.TestCase):
    def test_eq(self):
        self.assertEqual(ord_1, ord_3)
        self.assertNotEqual(ord_1, ord_2)


if __name__ == '__main__':
    unittest.main()
Exemplo n.º 40
0
    :param ordonnancement: an Ordonnancement object where the scheduling of all jobs is done (in other words which
    represents a solution to an instance of the flow-shop permutation problem)
    :param file_path: path where the html file corresponding to the representation of the solution is stored (it needs
    to have the character "/" at the end or be the empty string)
    :param file_name: name of the html file corresponding to the representation of the solution
    :param show_durations: boolean which indicates if the duration of the tasks have to be represented (True by default)
    """
    figure, figure_name = create_solution_figure(ordonnancement,
                                                 show_durations)
    if not file_name == "":
        figure_name = file_name
    figure.write_html(file_path + figure_name + '.html')
    return None


# "main" to give an example of how to use the "visualisation.py" methods
if __name__ == "__main__":
    a = Job(1, [1, 1, 1, 1, 10])
    b = Job(2, [1, 1, 1, 4, 8])
    c = Job(3, [2, 1, 3, 5, 1])
    d = Job(4, [2, 5, 5, 3, 3])
    e = Job(5, [1, 1, 3, 7, 1])
    scheduling = Ordonnancement(5)
    scheduling.ordonnancer_job(a)
    scheduling.ordonnancer_job(b)
    scheduling.ordonnancer_job(c)
    scheduling.ordonnancer_job(d)
    scheduling.ordonnancer_job(e)
    # show_solution_figure(scheduling)
    save_solution_as_html(scheduling)
Exemplo n.º 41
0
 def setUp(self):
     text = u'{"created_at":"Fri Oct 30 15:29:45 +0000 2015","id":659789756637822976,"id_str":"659789756637822976","text":"@IKEA complain https:\/\/t.co\/GzyHJC6jMI"}'
     self.job = Job(text)
     self.job.clean_data()
Exemplo n.º 42
0
 def test_getJobConf(self):
     job = Job(**self.opts)
     self.assertEquals(job.getJobConf(), self.opts["jobconf"])