Esempio n. 1
0
def test_create_meeting_fail(
        client: tp.Generator, create_meeting_request_body: dict[str,
                                                                str]) -> None:
    with client as client:
        for _ in range(4):
            client.post("/meeting", json=create_meeting_request_body)
        response = client.post("/meeting", json=create_meeting_request_body)

    assert response.status_code == 409
Esempio n. 2
0
    def save_from(self, source: typing.Generator, rechunk=True, executor=None):
        """Iterate over source and save the results under key
        along with metadata
        """
        pending = []
        exhausted = False
        chunk_i = 0
        try:
            while not exhausted:
                chunk = None
                try:
                    if rechunk and self.allow_rechunk:
                        while (chunk is None or
                               chunk.data.nbytes < chunk.target_size_mb * 1e6):
                            chunk = strax.Chunk.concatenate(
                                [chunk, next(source)])
                    else:
                        chunk = next(source)
                except StopIteration:
                    exhausted = True

                if chunk is None:
                    break

                new_f = self.save(chunk=chunk,
                                  chunk_i=chunk_i,
                                  executor=executor)
                pending = [f for f in pending if not f.done()]
                if new_f is not None:
                    pending += [new_f]
                chunk_i += 1

        except strax.MailboxKilled:
            # Write exception (with close), but exit gracefully.
            # One traceback on screen is enough
            self.close(wait_for=pending)
            pass

        except Exception as e:
            # log exception for the final check
            self.got_exception = e
            # Throw the exception back into the mailbox
            # (hoping that it is still listening...)
            source.throw(e)
            raise e

        finally:
            if not self.closed:
                self.close(wait_for=pending)
Esempio n. 3
0
def route_request(client: typing.Generator,
                  route: str,
                  send_json: dict = {},
                  allow_msg_errors=False,
                  allow_msg_warnings=False):
    default_json = {
        # auth defaults
        "access_token": "test",
        "username": "******"
    }

    send_json = {**default_json, **send_json}

    res = client.post(route, json=send_json, follow_redirects=True)

    res_json = res.get_json()

    assert "messages" in res_json, "No messages in response"
    res_msgs = res_json["messages"]

    assert "data" in res_json, "No data in resonse"
    res_data = res_json["data"]

    if not allow_msg_errors:
        raise_errors_from_res_msg(res_msgs)

    if not allow_msg_warnings:
        raise_warnings_from_res_msg(res_msgs)

    return res_data, res_msgs
Esempio n. 4
0
def test_fetch_all_meetings(client: tp.Generator) -> None:
    with client as client:
        response = client.get("/meetings")

    assert response.status_code == 200

    response_json = response.json()
    assert len(response_json) == 5
Esempio n. 5
0
 def transform_list(self, generator):
     """
     Takes a generator of metrics and transforms. It keeps the value in case one needs
     to do  "= yield from"
     """
     generagtor_with_return = Generator(generator)
     for metric in generagtor_with_return:
         yield from self.transform(metric)
     return generagtor_with_return.value
Esempio n. 6
0
    def __init__(self, seed, m, n: int) -> None:
        # Instance of rng class
        self.__rng = Generator(int(seed))

        # Number of tasks
        self.n = int(n)
        # Number of machines
        self.m = int(m)

        self.data = [[None for _ in range(2)] for _ in range(self.n)]
Esempio n. 7
0
def fib_generator(n: int) -> Generator(int, None, None):
    """Calculate fib using python generators"""
    if n in {0, 1}:
        yield n
    elif n < 0:
        raise NegativeNumberError
    else:
        prev, curr = 0, 1
        for _ in range(1, n):
            prev, curr = prev, prev + curr
            yield curr
Esempio n. 8
0
def test_stop_meeting(mocker: MockerFixture, client: tp.Generator) -> None:
    mocked_api_call = mocker.patch(
        "routers.zoom.zoom_service.zoom.stop_meeting")
    mocked_api_call.return_value = {}

    meeting_id = "123"

    with client as client:
        response = client.delete(f"/meeting/{meeting_id}")

    assert response.status_code == 200
Esempio n. 9
0
def test_stop_all_meetings(mocker: MockerFixture,
                           client: tp.Generator) -> None:
    mocked_api_call = mocker.patch(
        "services.meeting.zoom_service.zoom.stop_meeting")
    mocked_api_call.return_value = {}

    body = {"meetings_id": ["123", "312"]}

    with client as client:
        response = client.put("/meetings", json=body)

    assert response.status_code == 200
Esempio n. 10
0
def simulated_annealing(inst: Instance, seed, epoch=5):
    rng = Generator(int(seed))

    N = inst.data
    # greedy
    N.sort(key=lambda x: sum(x[1]))

    T = 1550.0
    Tend = 20.0

    L = int(epoch)

    pi = []
    pi_new = []
    pi_s = []

    it = 0

    for each in N:
        pi.append(each)
        pi_s.append(each)

    while T > Tend:
        for _ in range(L):
            i = rng.nextInt(0, len(N) - 1)
            j = rng.nextInt(0, len(N) - 1)
            pi_new = pi
            pi_new[i], pi_new[j] = pi_new[j], pi_new[i]

            C_max_new = C_max(pi_new)
            C_max_old = C_max(pi)

            if C_max_new > C_max_old:
                r = rng.nextFloat(0, 1)
                delta = C_max_old - C_max_new
                if r >= math.exp(delta / T):
                    pi_new = pi

            pi = pi_new

            if C_max(pi) < C_max(pi_s):
                pi_s = pi

        it += 1
        T = T / math.log(it + 1)

    return pi_s
Esempio n. 11
0
def test_create_meeting(
    mocker: MockerFixture,
    client: tp.Generator,
    create_meeting_request_body: dict[str, str],
    create_meeting_method_response: dict[str, str],
) -> None:
    mocked_api_call = mocker.patch(
        "services.meeting.zoom_service.zoom.create_meeting")
    mocked_api_call.return_value = create_meeting_method_response

    with client as client:
        response = client.post("/meeting", json=create_meeting_request_body)

    assert response.status_code == 200

    response_json = response.json()
    assert response_json["meeting_id"] == "123"
Esempio n. 12
0
def test_fetch_meeting(
    mocker: MockerFixture,
    client: tp.Generator,
    create_meeting_method_response: dict[str, str],
) -> None:
    mocked_api_call = mocker.patch(
        "routers.zoom.zoom_service.zoom.get_meeting")
    mocked_api_call.return_value = create_meeting_method_response

    meeting_id = "123"

    with client as client:
        response = client.get(f"/meeting/{meeting_id}")

    assert response.status_code == 200

    response_json = response.json()
    assert response_json["start_url"] == "https://test.com/"
Esempio n. 13
0
def main(args):

    device = "cpu" if args.no_cuda else "cuda"
    start_t = time.time()
    generator = Generator().to(device)
    end_t = time.time()
    print("init time : {}".format(end_t - start_t))

    start_t = time.time()
    pretrain_model = flow.load(args.model_path)
    generator.load_state_dict(pretrain_model)
    end_t = time.time()
    print("load params time : {}".format(end_t - start_t))

    generator.eval()

    start_t = time.time()
    z = to_tensor(np.random.normal(0, 1, size=(args.batch_size, 100)),
                  False).to(device)
    predictions = to_numpy(generator(z), False)
    end_t = time.time()
    print("infer time : {}".format(end_t - start_t))
    save_images(predictions, args.batch_size, args.save_path)
Esempio n. 14
0
def main():
    # 31536000 loop represent 1 year
    nodelist = []

    for i in range(N):
        node = Node(i, C, G, M, NICB)
        nodelist.append(node)

    Generator = Job_Generator()
    '''
    generate job and task in a fixed way
    jid; tid start from 1
    idex start from 0
    '''
    jobs = Generator.fixed_jobs(J, T, 3, 4, 2, 1024 * 1024 * 1024, 1024 * 1024)
    '''
    generate job's start time

    /*****/
    we assume all task of a job start at the same time in this model. 
    we must schedule all these task into the cluster at the same time (gang scheduling for AI & HPC)
    /*****/

    If you want to modify it in there are some order or dependency between the tasks in the jobs.
    1. we need keep the job in the jobQ even if there are some task has been scheduled in the nodes
    2. we need to set the order of these jobs
    /*****/
    '''
    start_times = []
    start_times = np.zeros(len(jobs))
    '''
    generate scheduler by the policy
    '''
    basic_policy = Basic_Policy_Share()
    scheduler = Scheduler(basic_policy)

    # '''
    # generate task inference as the profiling data
    # '''

    # start simulation
    M40_4 = Cluster(nodelist, scheduler, J * T)
    timestamp = 0

    #while (M40_4.finalize() == False):
    jobs_need_add = []
    for index in range(len(start_times)):
        if (start_times[index] == timestamp):
            jobs_need_add.append(jobs[index])
    if (len(jobs_need_add) != 0):
        M40_4.add_jobs(jobs_need_add)
    for node in nodelist:
        node.print_info()

    # never free
    M40_4.step()
    timestamp += 1

    print(timestamp)
    waiting_times = []
    for task in M40_4.finish_task:
        waiting_times.append(task.waitingtime)
    print("Average Respond Time {}".format(np.mean(waiting_times)))
    print("Throughput {}".format(round(J / timestamp, 2)))
Esempio n. 15
0
    def save_from(self, source: typing.Generator, rechunk=True, executor=None):
        """Iterate over source and save the results under key
        along with metadata
        """
        pending = []
        exhausted = False
        chunk_i = 0

        run_id = self.md['run_id']
        _is_super_run = run_id.startswith('_')
        try:
            while not exhausted:
                chunk = None

                try:
                    if rechunk and self.allow_rechunk:
                        while (chunk is None or
                                chunk.data.nbytes < chunk.target_size_mb*1e6):
                            next_chunk = next(source)

                            if _is_super_run:
                                # If we are creating a superrun, we load data from subruns
                                # and the loaded subrun chunk becomes a superun chunk:
                                next_chunk = strax.transform_chunk_to_superrun_chunk(run_id, 
                                                                                     next_chunk)  
                            chunk = strax.Chunk.concatenate([chunk, next_chunk])
                    else:
                        chunk = next(source)
                        if _is_super_run:
                            # If we are creating a superrun, we load data from subruns
                            # and the loaded subrun chunk becomes a superun chunk:
                            chunk = strax.transform_chunk_to_superrun_chunk(run_id, chunk)

                except StopIteration:
                    exhausted = True

                if chunk is None:
                    break

                new_f = self.save(chunk=chunk,
                                  chunk_i=chunk_i, executor=executor)
                pending = [f for f in pending if not f.done()]
                if new_f is not None:
                    pending += [new_f]
                chunk_i += 1

        except strax.MailboxKilled:
            # Write exception (with close), but exit gracefully.
            # One traceback on screen is enough
            self.close(wait_for=pending)

        except Exception as e:
            # log exception for the final check
            self.got_exception = e
            # Throw the exception back into the mailbox
            # (hoping that it is still listening...)
            source.throw(e)
            raise e

        finally:
            if not self.closed:
                self.close(wait_for=pending)