예제 #1
0
def randint(x, y, onion=3):
    """a Random Integer Generator.
    
    This Function. creates an random integer and use it as seed. the process will still continue, until "onion" (another name of Layer) reached 0 or lesser."""
    base = _randbase(x, y)
    base_ = None
    default_onion = onion
    if x == y:
        return x
    while onion > 0:
        n = _randbase(0, 10)
        if not base_:
            if n <= 5:
                base_ = _Random(base).randint(x, y)
            if n > 5 and n <= 10:
                base_ = _SystemRandom(base).randint(x, y)
        else:
            if n <= 5:
                base_ = _Random(base_).randint(x, y)
            if n > 5 and n <= 10:
                base_ = _SystemRandom(base_).randint(x, y)
        onion -= 1
    if default_onion <= 0:
        base_ = randint(x, y, 3)  # Default
    return base_
예제 #2
0
def _run_packet_mode(encoder, decoder, recoder):
    
    c1 = ChannelInfo()
    c2 = ChannelInfo()
    random_generator_1 = _Random()
    random_generator_2 = _Random()

    recoder_buffer = []
    dec_start = None
    dec_end = None
    
    #first_null = encoder.genPacket()
    #assert(first_null == '')

    while not decoder.complete():
        for x in xrange(M):
            packet = encoder.genPacket()
            assert(packet != '')
            c1.num_sent += 1
            
            # channel 1 loss
            if random_generator_1.random() > LOSS_1:
                recoder_buffer.append(packet)
                c1.num_received += 1

            if len(recoder_buffer) is 0:
                continue

            recoder_data = ''.join(recoder_buffer)
            recoded_packet = recoder.genPacket(recoder_data, len(recoder_buffer))
            c2.num_sent += 1

            # channel 2 loss
            if random_generator_2.random() > LOSS_2:
                
                decoder.receivePacket(recoded_packet)
                                
                c2.num_received += 1
                if decoder.complete():
                    break

        del recoder_buffer[0:len(recoder_buffer)]
        packet = encoder.genPacket()

        if not decoder.complete():
            assert(packet is '')

    return decoder.buf, c1, c2
예제 #3
0
파일: sim.py 프로젝트: sunkairan/DAF_Codes
def _run_packet_mode(encoder, decoder, recoder):

    c1 = ChannelInfo()
    c2 = ChannelInfo()
    random_generator_1 = _Random()
    random_generator_2 = _Random()

    recoder_buffer = []
    dec_start = None
    dec_end = None

    # first_null = encoder.genPacket()
    # assert(first_null == '')

    while not decoder.complete():
        for x in xrange(M):
            packet = encoder.genPacket()
            assert packet != ""
            c1.num_sent += 1

            # channel 1 loss
            if random_generator_1.random() > LOSS_1:
                recoder_buffer.append(packet)
                c1.num_received += 1

            if len(recoder_buffer) is 0:
                continue

            recoder_data = "".join(recoder_buffer)
            recoded_packet = recoder.genPacket(recoder_data, len(recoder_buffer))
            c2.num_sent += 1

            # channel 2 loss
            if random_generator_2.random() > LOSS_2:

                decoder.receivePacket(recoded_packet)

                c2.num_received += 1
                if decoder.complete():
                    break

        del recoder_buffer[0 : len(recoder_buffer)]
        packet = encoder.genPacket()

        if not decoder.complete():
            assert packet is ""

    return decoder.buf, c1, c2
예제 #4
0
파일: sim.py 프로젝트: sunkairan/DAF_Codes
def _run_one_hop(encoder, decoder):
    c1 = ChannelInfo()
    random_generator_1 = _Random()

    dec_start = None
    dec_end = None

    # first_null = encoder.genPacket()
    # assert(first_null == '')

    while not decoder.complete():
        for x in xrange(M):
            packet = encoder.genPacket()
            assert packet != ""
            c1.num_sent += 1

            # channel 1 loss
            if random_generator_1.random() > LOSS_1:
                decoder.receivePacket(packet)
                c1.num_received += 1
                if decoder.complete():
                    break

        # generate a null packet
        packet = encoder.genPacket()
        if not decoder.complete():
            assert packet is ""

    return decoder.buf, c1
예제 #5
0
def _run_one_hop(encoder, decoder):
    c1 = ChannelInfo()
    random_generator_1 = _Random()

    dec_start = None
    dec_end = None
    
    #first_null = encoder.genPacket()
    #assert(first_null == '')

    while not decoder.complete():
        for x in xrange(M):
            packet = encoder.genPacket()
            assert(packet != '')
            c1.num_sent += 1

            # channel 1 loss
            if random_generator_1.random() > LOSS_1:
                decoder.receivePacket(packet)
                c1.num_received += 1
                if decoder.complete():
                    break

        # generate a null packet
        packet = encoder.genPacket()
        if not decoder.complete():
            assert(packet is '')

    return decoder.buf, c1
예제 #6
0
파일: Math.py 프로젝트: gonzaponte/Python
 def _FirstMiddlePoint( self, a, c ):
     '''
         Gets a random point within the interval where the function takes a smaller value than the limits to start iteration.
     '''
     R = _Random()
     limit = min( self.fun(a), self.fun(c) )
     while True:
         point = random.uniform( a, c )
         if self.fun(point) < limit:
             return point
예제 #7
0
def _run_batch_mode(encoder, decoder, recoder):

    c1 = ChannelInfo()
    c2 = ChannelInfo()
    random_generator_1 = _Random()
    random_generator_2 = _Random()

    recoder_buffer = []

    #first_null = encoder.genPacket()
    #assert(first_null == '')

    while True:
        # channel 1 
        for i in xrange(M):
            packet = encoder.genPacket()
            assert(packet is not '')
            c1.num_sent += 1
            if random_generator_1.random() >= LOSS_1:
                recoder_buffer.append(packet)
                c1.num_received += 1

        # channel 2
        if len(recoder_buffer) is 0:
            continue

        recoder_data = ''.join(recoder_buffer)
        for i in xrange(M):
            recoded_packet = recoder.genPacket(recoder_data, len(recoder_buffer))
            c2.num_sent += 1
            if random_generator_2.random() >= LOSS_2:
                decoder.receivePacket(recoded_packet)
                c2.num_received += 1

                # bye bye
                if decoder.complete():
                    return decoder.buf, c1, c2

        # prepare to start a new batch
        packet = encoder.genPacket()
        assert(packet is '')
        del recoder_buffer[0:len(recoder_buffer)]
예제 #8
0
파일: sim.py 프로젝트: sunkairan/DAF_Codes
def _run_batch_mode(encoder, decoder, recoder):

    c1 = ChannelInfo()
    c2 = ChannelInfo()
    random_generator_1 = _Random()
    random_generator_2 = _Random()

    recoder_buffer = []

    # first_null = encoder.genPacket()
    # assert(first_null == '')

    while True:
        # channel 1
        for i in xrange(M):
            packet = encoder.genPacket()
            assert packet is not ""
            c1.num_sent += 1
            if random_generator_1.random() >= LOSS_1:
                recoder_buffer.append(packet)
                c1.num_received += 1

        # channel 2
        if len(recoder_buffer) is 0:
            continue

        recoder_data = "".join(recoder_buffer)
        for i in xrange(M):
            recoded_packet = recoder.genPacket(recoder_data, len(recoder_buffer))
            c2.num_sent += 1
            if random_generator_2.random() >= LOSS_2:
                decoder.receivePacket(recoded_packet)
                c2.num_received += 1

                # bye bye
                if decoder.complete():
                    return decoder.buf, c1, c2

        # prepare to start a new batch
        packet = encoder.genPacket()
        assert packet is ""
        del recoder_buffer[0 : len(recoder_buffer)]
예제 #9
0
 def rng(self):
     if _is_jython:
         #A JVM run cannot determine or change its pid so dummy this.
         cur_pid = 1
     else:
         cur_pid = _os.getpid()
     
     if cur_pid != getattr(self, '_rng_pid', None):
         self._rng = _Random()
         self._rng_pid = cur_pid
     return self._rng
예제 #10
0
 def rng(self):
     if _os.sys.platform.startswith("java"):
         #A JVM run cannot determine or change its pid so dummy this.
         cur_pid = 1
     else:
         cur_pid = _os.getpid()
     
     if cur_pid != getattr(self, '_rng_pid', None):
         self._rng = _Random()
         self._rng_pid = cur_pid
     return self._rng
예제 #11
0
 def rng(self):
     if _os.sys.platform.startswith("java"):
         #A JVM run cannot determine or change its pid so dummy this.
         cur_pid = 1
     else:
         cur_pid = _os.getpid()
     
     if cur_pid != getattr(self, '_rng_pid', None):
         self._rng = _Random()
         self._rng_pid = cur_pid
     return self._rng
예제 #12
0
 def __init__(self,
              characters="abcdefghijklmnopqrstuvwxyz0123456789_",
              length=8,
              rng=None):
     if rng is None:
         rng = _Random()
     if hasattr(_os, "fork"):
         # prevent same state after fork
         _os.register_at_fork(after_in_child=rng.seed)
     self.rng = rng
     self.characters = characters
     self.length = length
예제 #13
0
def _RandomNameSequence():
    """Generate an endless sequence of unpredictable strings which
    can safely be incorporated into file names.  Each string is 8
    characters long.  Multiple threads and forked processes can
    safely use the same instance at the same time."""

    characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
    rng_pid = None
    while True:
        cur_pid = _os.getpid()
        if cur_pid != rng_pid:
            choose = _Random().choice
            rng_pid = cur_pid
        letters = [choose(characters) for dummy in range(8)]
        yield ''.join(letters)
예제 #14
0
    def validate_output(self):
        # Use validator to validate the data in at-least-once mode
        # save sink data to a file
        if self.config['validation_cmd']:
            # TODO: move to validations.py
            # TODO: This forces _every_ test to save its validated file to artifacts.
            # remove it once the bug with validation failing but passing on the artifact
            # is resolved.
            #out_file = os.path.join(cluster.res_dir, 'received.txt')
            base_path = self.cluster.res_dir
            makedirs_if_not_exists(base_path)
            chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
            rng = _Random()
            random_str = ''.join([rng.choice(chars) for _ in range(8)])
            out_name = 'received_{}.txt'.format(random_str)
            out_file = os.path.join(base_path, out_name)
            logging.info("Saving validation output to {}".format(out_file))
            out_files = self.cluster.sinks[0].save(out_file)

            with open(os.path.join(self.cluster.res_dir, "ops.log"),
                      "wt") as f:
                for op in self.cluster.ops:
                    f.write("{}\n".format(op))

            # Validate captured output
            logging.info("Validating output")
            cmd_validate = self.config['validation_cmd'].format(
                out_file=self.cluster.res_dir)
            res = run_shell_cmd(cmd_validate)
            try:
                assert (res.success)
                logging.info("Validation successful")
                for out_file in out_files:
                    try:
                        os.remove(out_file)
                        logging.info(
                            "Removed validation file: {}".format(out_file))
                    except:
                        logging.info(
                            "Failed to remove file: {}".format(out_file))
                        pass
            except:
                err = AssertionError('Validation failed with the following '
                                     'error:\n{}'.format(res.output))
                logging.exception(err)
                raise AssertionError("Validation failed")
예제 #15
0
 def rng(self):
     cur_pid = _os.getpid()
     if cur_pid != getattr(self, "_rng_pid", None):
         self._rng = _Random()
         self._rng_pid = cur_pid
     return self._rng
예제 #16
0
 def __init__(self):
     self.mutex = _allocate_lock()
     self.rng = _Random()
     self.normcase = _os.path.normcase
예제 #17
0
 def __init__(self):
     self.rng = _Random()
예제 #18
0
 def __init__(self):
     self.rng = _Random()
예제 #19
0
 def rng(self) -> _Random:
     cur_pid = _os.getpid()
     if cur_pid != getattr(self, '_rng_pid', None):
         self._rng = _Random()
         self._rng_pid = cur_pid
     return self._rng
예제 #20
0
def _run(persistent_data,
         res_ops,
         command,
         ops=[],
         initial=None,
         source_type='tcp',
         source_name='Detector',
         source_number=1,
         partitions=40,
         validation_cmd=False,
         sender_mps=1000,
         sender_interval=0.01):
    # set global flag _OUTPUT_TYPE based on application command
    # [TODO] make this less coupled and brittle
    global _OUTPUT_TYPE
    _OUTPUT_TYPE = "int" if "window_detector" in command else "array"

    host = '127.0.0.1'
    sinks = 1
    sink_mode = 'framed'
    split_streams = True
    batch_size = int(sender_mps * sender_interval)
    logging.debug("batch_size is {batch_size}".format(batch_size=batch_size))

    # If validation_cmd is False, it remains False and no validation is run
    logging.debug("Validation command is: {validation_cmd}".format(
        validation_cmd=validation_cmd))
    logging.debug("Source_type: {source_type}".format(source_type=source_type))
    logging.debug("source_name: {source_name}".format(source_name=source_name))
    logging.debug(
        "source_number: {source_number}".format(source_number=source_number))
    logging.debug("partitions: {partitions}".format(partitions=partitions))

    if not isinstance(ops, (list, tuple)):
        raise TypeError("ops must be a list or tuple of operations")

    # determine whether there is log rotation in the ops
    log_rotation = any(isinstance(op, Rotate) for op in ops)
    logging.info("log_rotation={}".format(log_rotation))

    # If no initial workers value is given, determine the minimum number
    # required at the start so that the cluster never goes below 1 worker.
    # If a number is given, then verify it is sufficient.
    if ops:
        if isinstance(ops[0], Recover):
            raise ValueError("The first operation cannot be Recover")
        lowest = lowest_point(ops)
        if lowest < 1:
            min_workers = abs(lowest) + 1
        else:
            min_workers = 1
        if isinstance(initial, int):
            logging.debug('initial: {}'.format(initial))
            logging.debug('min: {}'.format(min_workers))
            assert (initial >= min_workers)
            workers = initial
        else:
            workers = min_workers
    else:  # Test is only for setup using initial workers
        assert (initial > 0)
        workers = initial

    logging.info("Initial cluster size: {}".format(workers))

    parts = get_parts(partitions, source_number)
    sources = []
    if source_type == 'gensource':
        # noop
        command += " --source gensource"
    elif source_type == 'tcp':
        command += " --source tcp"
        # for each part, create a MultiSequenceGenerator with the right base_index
        for part in parts:
            sources.append(
                MultiSequenceGenerator(base_index=min(part),
                                       initial_partitions=len(part)))
    elif source_type == 'alo':
        command += " --source alo"
        # for each number in each part, create an ALOSequenceGenerator
        # and group them in groups matching the parts
        for part in parts:
            sources.append([
                ALOSequenceGenerator("key_{key}".format(key=key), 10000)
                for key in part
            ])
    else:
        raise ValueError(
            "source_type must be one of ['gensource', 'tcp', 'alo']")

    # Start cluster
    logging.debug("Creating cluster")
    with Cluster(command=command,
                 host=host,
                 sources=[source_name] if source_type != 'gensource' else [],
                 workers=workers,
                 sinks=sinks,
                 sink_mode=sink_mode,
                 split_streams=split_streams,
                 log_rotation=log_rotation,
                 persistent_data=persistent_data) as cluster:

        # start senders
        if source_type == 'tcp':
            # All tcp sources connect to initializer, because they don't
            # support shrinking
            for source_gen in sources:
                sender = Sender(cluster.source_addrs[0][source_name],
                                Reader(source_gen),
                                batch_size=batch_size,
                                interval=sender_interval,
                                reconnect=True)
                cluster.add_sender(sender, start=True)
        elif source_type == 'alo':
            for (idx, source_gens) in enumerate(sources):
                sender = ALOSender(
                    source_gens, VERSION, COOKIE, command,
                    "instance_{idx}".format(idx=idx),
                    (cluster.source_addrs[idx %
                                          len(cluster.workers)][source_name]))
                cluster.add_sender(sender, start=True)

        # let the senders send some data first
        time.sleep(1)

        # loop over ops, keeping the result and passing it to the next op
        res = None
        assert (not cluster.get_crashed_workers())
        for op in ops:
            res_ops.append(op)
            cluster.log_op(op)
            logging.info("Executing: {}".format(op))
            res = op.apply(cluster, res)
            assert (not cluster.get_crashed_workers())

        # Wait a full second for things to calm down
        time.sleep(1)

        # If using external senders, wait for them to stop cleanly
        if cluster.senders:
            # Tell the multi-sequence-sender to stop
            cluster.stop_senders()

            # wait for senders to reach the end of their readers and stop
            for s in cluster.senders:
                cluster.wait_for_sender(s)

            # Create await_values for the sink based on the stop values from
            # the multi sequence generator
            await_values = get_await_values(
                [sender.last_sent() for sender in cluster.senders])
            cluster.sink_await(values=await_values, func=json_keyval_extract)

        logging.info("Completion condition achieved. Shutting down cluster.")

        # Use validator to validate the data in at-least-once mode
        # save sink data to a file
        if validation_cmd:
            # TODO: move to validations.py
            # TODO: This forces _every_ test to save its validated file to artifacts.
            # remove it once the bug with validation failing but passing on the artifact
            # is resolved.
            #out_file = os.path.join(cluster.res_dir, 'received.txt')
            base_path = cluster.res_dir
            makedirs_if_not_exists(base_path)
            chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
            rng = _Random()
            random_str = ''.join([rng.choice(chars) for _ in range(8)])
            out_name = 'received_{}.txt'.format(random_str)
            out_file = os.path.join(base_path, out_name)
            logging.info("Saving validation output to {}".format(out_file))
            out_files = cluster.sinks[0].save(out_file)

            with open(os.path.join(cluster.res_dir, "ops.log"), "wt") as f:
                for op in cluster.ops:
                    f.write("{}\n".format(op))

            # Validate captured output
            logging.info("Validating output")
            cmd_validate = validation_cmd.format(out_file=cluster.res_dir)
            res = run_shell_cmd(cmd_validate)
            try:
                assert (res.success)
                logging.info("Validation successful")
                for out_file in out_files:
                    try:
                        os.remove(out_file)
                        logging.info(
                            "Removed validation file: {}".format(out_file))
                    except:
                        logging.info(
                            "Failed to remove file: {}".format(out_file))
                        pass
            except:
                err = AssertionError('Validation failed with the following '
                                     'error:\n{}'.format(res.output))
                logging.exception(err)
                raise AssertionError("Validation failed")

        # Validate worker actually underwent recovery
        if cluster.restarted_workers:
            # TODO: move to validations.py
            logging.info("Validating recovery")
            pattern = r"RESILIENCE\: Replayed \d+ entries from recovery log file\."
            for r in cluster.restarted_workers:
                stdout = r.get_output()
                try:
                    assert (re.search(pattern, stdout) is not None)
                    logging.info("{} recovered successfully".format(r.name))
                except AssertionError:
                    raise AssertionError(
                        'Worker {} does not appear to have performed '
                        'recovery as expected.'.format(r.name))
예제 #21
0
 def rng(self):
     cur_pid = _os.getpid()
     if cur_pid != getattr(self, '_rng_pid', None):
         self._rng = _Random()
         self._rng_pid = cur_pid
     return self._rng
예제 #22
0
 def __init__(self, prob, prices, seed=123):
     self.prob = prob
     self.prices = prices
     self._rand = _Random(seed)
예제 #23
0
 def __init__(self):
     self.mutex = _allocate_lock()
     self.rng = _Random()
     self.normcase = _os.path.normcase
 def reset(self):
     """Reset this object back to the initial state"""
     self._state = _STATE(self._initial_price, _Random(self._seed), 0)