Exemplo n.º 1
0
 def test_count(self):
     self.assertEqual(0, iter_utils.count([]))
     self.assertEqual(1, iter_utils.count(['a']))
     self.assertEqual(10, iter_utils.count(compat_range(0, 10)))
     self.assertEqual(1000, iter_utils.count(compat_range(0, 1000)))
     self.assertEqual(0, iter_utils.count(compat_range(0)))
     self.assertEqual(0, iter_utils.count(compat_range(-1)))
Exemplo n.º 2
0
    def execute(self, image_config, mandelbrot_config, chunk):
        """Returns the number of iterations before the computation "escapes".

        Given the real and imaginary parts of a complex number, determine if it
        is a candidate for membership in the mandelbrot set given a fixed
        number of iterations.
        """

        # Parts borrowed from (credit to mark harris and benoît mandelbrot).
        #
        # http://nbviewer.ipython.org/gist/harrism/f5707335f40af9463c43
        def mandelbrot(x, y, max_iters):
            c = complex(x, y)
            z = 0.0j
            for i in compat_range(max_iters):
                z = z * z + c
                if (z.real * z.real + z.imag * z.imag) >= 4:
                    return i
            return max_iters

        min_x, max_x, min_y, max_y, max_iters = mandelbrot_config
        height, width = image_config['size']
        pixel_size_x = (max_x - min_x) / width
        pixel_size_y = (max_y - min_y) / height
        block = []
        for y in compat_range(chunk[0], chunk[1]):
            row = []
            imag = min_y + y * pixel_size_y
            for x in compat_range(0, width):
                real = min_x + x * pixel_size_x
                row.append(mandelbrot(real, imag, max_iters))
            block.append(row)
        return block
def main():
    if six.PY3:
        # TODO(harlowja): Hack to make eventlet work right, remove when the
        # following is fixed: https://github.com/eventlet/eventlet/issues/230
        from taskflow.utils import eventlet_utils as _eu  # noqa
        try:
            import eventlet as _eventlet  # noqa
        except ImportError:
            pass
    with contextlib.closing(fake_client.FakeClient()) as c:
        created = []
        for i in compat_range(0, PRODUCERS):
            p = threading_utils.daemon_thread(producer, i + 1, c)
            created.append(p)
            p.start()
        consumed = collections.deque()
        for i in compat_range(0, WORKERS):
            w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
            created.append(w)
            w.start()
        while created:
            t = created.pop()
            t.join()
        # At the end there should be nothing leftover, let's verify that.
        board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
        board.connect()
        with contextlib.closing(board):
            if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
                return 1
            return 0
def main():
    if six.PY3:
        # TODO(harlowja): Hack to make eventlet work right, remove when the
        # following is fixed: https://github.com/eventlet/eventlet/issues/230
        from taskflow.utils import eventlet_utils as _eu  # noqa
        try:
            import eventlet as _eventlet  # noqa
        except ImportError:
            pass
    with contextlib.closing(fake_client.FakeClient()) as c:
        created = []
        for i in compat_range(0, PRODUCERS):
            p = threading_utils.daemon_thread(producer, i + 1, c)
            created.append(p)
            p.start()
        consumed = collections.deque()
        for i in compat_range(0, WORKERS):
            w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
            created.append(w)
            w.start()
        while created:
            t = created.pop()
            t.join()
        # At the end there should be nothing leftover, let's verify that.
        board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
        board.connect()
        with contextlib.closing(board):
            if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
                return 1
            return 0
Exemplo n.º 5
0
    def execute(self, image_config, mandelbrot_config, chunk):
        """Returns the number of iterations before the computation "escapes".

        Given the real and imaginary parts of a complex number, determine if it
        is a candidate for membership in the mandelbrot set given a fixed
        number of iterations.
        """

        # Parts borrowed from (credit to mark harris and benoît mandelbrot).
        #
        # http://nbviewer.ipython.org/gist/harrism/f5707335f40af9463c43
        def mandelbrot(x, y, max_iters):
            c = complex(x, y)
            z = 0.0j
            for i in compat_range(max_iters):
                z = z * z + c
                if (z.real * z.real + z.imag * z.imag) >= 4:
                    return i
            return max_iters

        min_x, max_x, min_y, max_y, max_iters = mandelbrot_config
        height, width = image_config['size']
        pixel_size_x = (max_x - min_x) / width
        pixel_size_y = (max_y - min_y) / height
        block = []
        for y in compat_range(chunk[0], chunk[1]):
            row = []
            imag = min_y + y * pixel_size_y
            for x in compat_range(0, width):
                real = min_x + x * pixel_size_x
                row.append(mandelbrot(real, imag, max_iters))
            block.append(row)
        return block
Exemplo n.º 6
0
 def test_count(self):
     self.assertEqual(0, iter_utils.count([]))
     self.assertEqual(1, iter_utils.count(['a']))
     self.assertEqual(10, iter_utils.count(compat_range(0, 10)))
     self.assertEqual(1000, iter_utils.count(compat_range(0, 1000)))
     self.assertEqual(0, iter_utils.count(compat_range(0)))
     self.assertEqual(0, iter_utils.count(compat_range(-1)))
def main():
    if len(sys.argv) == 2:
        tbl = []
        with open(sys.argv[1], 'rb') as fh:
            reader = csv.reader(fh)
            for row in reader:
                tbl.append([float(r) if r else 0.0 for r in row])
    else:
        # Make some random table out of thin air...
        tbl = []
        cols = random.randint(1, 100)
        rows = random.randint(1, 100)
        for _i in compat_range(0, rows):
            row = []
            for _j in compat_range(0, cols):
                row.append(random.random())
            tbl.append(row)

    # Generate the work to be done.
    f = make_flow(tbl)

    # Now run it (using the specified executor)...
    try:
        executor = futurist.GreenThreadPoolExecutor(max_workers=5)
    except RuntimeError:
        # No eventlet currently active, use real threads instead.
        executor = futurist.ThreadPoolExecutor(max_workers=5)
    try:
        e = engines.load(f, engine='parallel', executor=executor)
        for st in e.run_iter():
            print(st)
    finally:
        executor.shutdown()

    # Find the old rows and put them into place...
    #
    # TODO(harlowja): probably easier just to sort instead of search...
    computed_tbl = []
    for i in compat_range(0, len(tbl)):
        for t in f:
            if t.index == i:
                computed_tbl.append(e.storage.get(t.name))

    # Do some basic validation (which causes the return code of this process
    # to be different if things were not as expected...)
    if len(computed_tbl) != len(tbl):
        return 1
    else:
        return 0
Exemplo n.º 8
0
 def mandelbrot(x, y, max_iters):
     c = complex(x, y)
     z = 0.0j
     for i in compat_range(max_iters):
         z = z * z + c
         if (z.real * z.real + z.imag * z.imag) >= 4:
             return i
     return max_iters
Exemplo n.º 9
0
 def mandelbrot(x, y, max_iters):
     c = complex(x, y)
     z = 0.0j
     for i in compat_range(max_iters):
         z = z * z + c
         if (z.real * z.real + z.imag * z.imag) >= 4:
             return i
     return max_iters
Exemplo n.º 10
0
def calculate(engine_conf):
    # Subdivide the work into X pieces, then request each worker to calculate
    # one of those chunks and then later we will write these chunks out to
    # an image bitmap file.

    # And unordered flow is used here since the mandelbrot calculation is an
    # example of an embarrassingly parallel computation that we can scatter
    # across as many workers as possible.
    flow = uf.Flow("mandelbrot")

    # These symbols will be automatically given to tasks as input to their
    # execute method, in this case these are constants used in the mandelbrot
    # calculation.
    store = {
        'mandelbrot_config': [-2.0, 1.0, -1.0, 1.0, MAX_ITERATIONS],
        'image_config': {
            'size': IMAGE_SIZE,
        }
    }

    # We need the task names to be in the right order so that we can extract
    # the final results in the right order (we don't care about the order when
    # executing).
    task_names = []

    # Compose our workflow.
    height, _width = IMAGE_SIZE
    chunk_size = int(math.ceil(height / float(CHUNK_COUNT)))
    for i in compat_range(0, CHUNK_COUNT):
        chunk_name = 'chunk_%s' % i
        task_name = "calculation_%s" % i
        # Break the calculation up into chunk size pieces.
        rows = [i * chunk_size, i * chunk_size + chunk_size]
        flow.add(
            MandelCalculator(
                task_name,
                # This ensures the storage symbol with name
                # 'chunk_name' is sent into the tasks local
                # symbol 'chunk'. This is how we give each
                # calculator its own correct sequence of rows
                # to work on.
                rebind={'chunk': chunk_name}))
        store[chunk_name] = rows
        task_names.append(task_name)

    # Now execute it.
    eng = engines.load(flow, store=store, engine_conf=engine_conf)
    eng.run()

    # Gather all the results and order them for further processing.
    gather = []
    for name in task_names:
        gather.extend(eng.storage.get(name))
    points = []
    for y, row in enumerate(gather):
        for x, color in enumerate(row):
            points.append(((x, y), color))
    return points
Exemplo n.º 11
0
def calculate(engine_conf):
    # Subdivide the work into X pieces, then request each worker to calculate
    # one of those chunks and then later we will write these chunks out to
    # an image bitmap file.

    # And unordered flow is used here since the mandelbrot calculation is an
    # example of a embarrassingly parallel computation that we can scatter
    # across as many workers as possible.
    flow = uf.Flow("mandelbrot")

    # These symbols will be automatically given to tasks as input to there
    # execute method, in this case these are constants used in the mandelbrot
    # calculation.
    store = {
        'mandelbrot_config': [-2.0, 1.0, -1.0, 1.0, MAX_ITERATIONS],
        'image_config': {
            'size': IMAGE_SIZE,
        }
    }

    # We need the task names to be in the right order so that we can extract
    # the final results in the right order (we don't care about the order when
    # executing).
    task_names = []

    # Compose our workflow.
    height, _width = IMAGE_SIZE
    chunk_size = int(math.ceil(height / float(CHUNK_COUNT)))
    for i in compat_range(0, CHUNK_COUNT):
        chunk_name = 'chunk_%s' % i
        task_name = "calculation_%s" % i
        # Break the calculation up into chunk size pieces.
        rows = [i * chunk_size, i * chunk_size + chunk_size]
        flow.add(
            MandelCalculator(task_name,
                             # This ensures the storage symbol with name
                             # 'chunk_name' is sent into the tasks local
                             # symbol 'chunk'. This is how we give each
                             # calculator its own correct sequence of rows
                             # to work on.
                             rebind={'chunk': chunk_name}))
        store[chunk_name] = rows
        task_names.append(task_name)

    # Now execute it.
    eng = engines.load(flow, store=store, engine_conf=engine_conf)
    eng.run()

    # Gather all the results and order them for further processing.
    gather = []
    for name in task_names:
        gather.extend(eng.storage.get(name))
    points = []
    for y, row in enumerate(gather):
        for x, color in enumerate(row):
            points.append(((x, y), color))
    return points
Exemplo n.º 12
0
def iter_forever(limit):
    """Yields values from iterator until a limit is reached.

    if limit is negative, we iterate forever.
    """
    if limit < 0:
        i = itertools.count()
        while True:
            yield next(i)
    else:
        for i in compat_range(0, limit):
            yield i
Exemplo n.º 13
0
 def wrapper(in_self, timeout=None, prefetch_size=1):
     incomings = []
     watch = timeutils.StopWatch(duration=timeout)
     with watch:
         for __ in compat_range(prefetch_size):
             msg = func(in_self, timeout=watch.leftover(return_none=True))
             if msg is not None:
                 incomings.append(msg)
             else:
                 # timeout reached or listener stopped
                 break
     return incomings
Exemplo n.º 14
0
def iter_forever(limit):
    """Yields values from iterator until a limit is reached.

    if limit is negative, we iterate forever.
    """
    if limit < 0:
        i = itertools.count()
        while True:
            yield next(i)
    else:
        for i in compat_range(0, limit):
            yield i
Exemplo n.º 15
0
def create_fractal():
    logging.basicConfig(level=logging.ERROR)

    # Setup our transport configuration and merge it into the worker and
    # engine configuration so that both of those use it correctly.
    shared_conf = dict(BASE_SHARED_CONF)
    shared_conf.update({
        'transport': 'memory',
        'transport_options': {
            'polling_interval': 0.1,
        },
    })

    if len(sys.argv) >= 2:
        output_filename = sys.argv[1]
    else:
        output_filename = None

    worker_conf = dict(WORKER_CONF)
    worker_conf.update(shared_conf)
    engine_conf = dict(ENGINE_CONF)
    engine_conf.update(shared_conf)
    workers = []
    worker_topics = []

    print('Calculating your mandelbrot fractal of size %sx%s.' % IMAGE_SIZE)
    try:
        # Create a set of workers to simulate actual remote workers.
        print('Running %s workers.' % (WORKERS))
        for i in compat_range(0, WORKERS):
            worker_conf['topic'] = 'calculator_%s' % (i + 1)
            worker_topics.append(worker_conf['topic'])
            w = worker.Worker(**worker_conf)
            runner = threading.Thread(target=w.run)
            runner.daemon = True
            runner.start()
            w.wait()
            workers.append((runner, w.stop))

        # Now use those workers to do something.
        engine_conf['topics'] = worker_topics
        results = calculate(engine_conf)
        print('Execution finished.')
    finally:
        # And cleanup.
        print('Stopping workers.')
        while workers:
            r, stopper = workers.pop()
            stopper()
            r.join()
    print("Writing image...")
    write_image(results, output_filename=output_filename)
Exemplo n.º 16
0
def create_fractal():
    logging.basicConfig(level=logging.ERROR)

    # Setup our transport configuration and merge it into the worker and
    # engine configuration so that both of those use it correctly.
    shared_conf = dict(BASE_SHARED_CONF)
    shared_conf.update({
        'transport': 'memory',
        'transport_options': {
            'polling_interval': 0.1,
        },
    })

    if len(sys.argv) >= 2:
        output_filename = sys.argv[1]
    else:
        output_filename = None

    worker_conf = dict(WORKER_CONF)
    worker_conf.update(shared_conf)
    engine_conf = dict(ENGINE_CONF)
    engine_conf.update(shared_conf)
    workers = []
    worker_topics = []

    print('Calculating your mandelbrot fractal of size %sx%s.' % IMAGE_SIZE)
    try:
        # Create a set of workers to simulate actual remote workers.
        print('Running %s workers.' % (WORKERS))
        for i in compat_range(0, WORKERS):
            worker_conf['topic'] = 'calculator_%s' % (i + 1)
            worker_topics.append(worker_conf['topic'])
            w = worker.Worker(**worker_conf)
            runner = threading.Thread(target=w.run)
            runner.daemon = True
            runner.start()
            w.wait()
            workers.append((runner, w.stop))

        # Now use those workers to do something.
        engine_conf['topics'] = worker_topics
        results = calculate(engine_conf)
        print('Execution finished.')
    finally:
        # And cleanup.
        print('Stopping workers.')
        while workers:
            r, stopper = workers.pop()
            stopper()
            r.join()
    print("Writing image...")
    write_image(results, output_filename=output_filename)
def main():
    with contextlib.closing(fake_client.FakeClient()) as c:
        created = []
        for i in compat_range(0, PRODUCERS):
            p = threading_utils.daemon_thread(producer, i + 1, c)
            created.append(p)
            p.start()
        consumed = collections.deque()
        for i in compat_range(0, WORKERS):
            w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
            created.append(w)
            w.start()
        while created:
            t = created.pop()
            t.join()
        # At the end there should be nothing leftover, let's verify that.
        board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
        board.connect()
        with contextlib.closing(board):
            if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
                return 1
            return 0
Exemplo n.º 18
0
def main():
    with contextlib.closing(fake_client.FakeClient()) as c:
        created = []
        for i in compat_range(0, PRODUCERS):
            p = threading_utils.daemon_thread(producer, i + 1, c)
            created.append(p)
            p.start()
        consumed = collections.deque()
        for i in compat_range(0, WORKERS):
            w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
            created.append(w)
            w.start()
        while created:
            t = created.pop()
            t.join()
        # At the end there should be nothing leftover, let's verify that.
        board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
        board.connect()
        with contextlib.closing(board):
            if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
                return 1
            return 0
Exemplo n.º 19
0
    def join(self, key_piece, *more_key_pieces):
        """Create and return a namespaced key from many segments.

        NOTE(harlowja): all pieces that are text/unicode are converted into
        their binary equivalent (if they are already binary no conversion
        takes place) before being joined (as redis expects binary keys and not
        unicode/text ones).
        """
        namespace_pieces = []
        if self._namespace is not None:
            namespace_pieces = [self._namespace, self.NAMESPACE_SEP]
        else:
            namespace_pieces = []
        key_pieces = [key_piece]
        if more_key_pieces:
            key_pieces.extend(more_key_pieces)
        for i in compat_range(0, len(namespace_pieces)):
            namespace_pieces[i] = misc.binary_encode(namespace_pieces[i])
        for i in compat_range(0, len(key_pieces)):
            key_pieces[i] = misc.binary_encode(key_pieces[i])
        namespace = b"".join(namespace_pieces)
        key = self.KEY_PIECE_SEP.join(key_pieces)
        return namespace + key
Exemplo n.º 20
0
    def join(self, key_piece, *more_key_pieces):
        """Create and return a namespaced key from many segments.

        NOTE(harlowja): all pieces that are text/unicode are converted into
        their binary equivalent (if they are already binary no conversion
        takes place) before being joined (as redis expects binary keys and not
        unicode/text ones).
        """
        namespace_pieces = []
        if self._namespace is not None:
            namespace_pieces = [self._namespace, self.NAMESPACE_SEP]
        else:
            namespace_pieces = []
        key_pieces = [key_piece]
        if more_key_pieces:
            key_pieces.extend(more_key_pieces)
        for i in compat_range(0, len(namespace_pieces)):
            namespace_pieces[i] = misc.binary_encode(namespace_pieces[i])
        for i in compat_range(0, len(key_pieces)):
            key_pieces[i] = misc.binary_encode(key_pieces[i])
        namespace = b"".join(namespace_pieces)
        key = self.KEY_PIECE_SEP.join(key_pieces)
        return namespace + key
def producer(ident, client):
    # Create a personal board (using the same client so that it works in
    # the same process) and start posting jobs on the board that we want
    # some entity to perform.
    name = "P-%s" % (ident)
    safe_print(name, "started")
    with backends.backend(name, SHARED_CONF.copy(), client=client) as board:
        for i in compat_range(0, PRODUCER_UNITS):
            job_name = "%s-%s" % (name, i)
            details = {
                'color': random.choice(['red', 'blue']),
            }
            job = board.post(job_name, book=None, details=details)
            safe_print(name, "'%s' [posted]" % (job))
            time.sleep(PRODUCER_DELAY)
    safe_print(name, "finished", prefix=">>>")
def producer(ident, client):
    # Create a personal board (using the same client so that it works in
    # the same process) and start posting jobs on the board that we want
    # some entity to perform.
    name = "P-%s" % (ident)
    safe_print(name, "started")
    with backends.backend(name, SHARED_CONF.copy(), client=client) as board:
        for i in compat_range(0, PRODUCER_UNITS):
            job_name = "%s-%s" % (name, i)
            details = {
                'color': random.choice(['red', 'blue']),
            }
            job = board.post(job_name, book=None, details=details)
            safe_print(name, "'%s' [posted]" % (job))
            time.sleep(PRODUCER_DELAY)
    safe_print(name, "finished", prefix=">>>")
Exemplo n.º 23
0
Arquivo: async.py Projeto: jzako/anvil
 def run(self, funcs):
     if self._workers:
         raise RuntimeError("Can not start another `run` with %s" " existing workers" % (len(self._workers)))
     self._queue = compat_queue.Queue()
     self._death.clear()
     futs = []
     for i in compat_range(0, self._max_workers):
         w = threading.Thread(target=_chained_worker, args=(i + 1, self._death, self._queue, futs))
         w.daemon = True
         w.start()
         self._workers.append(w)
     for func in funcs:
         fut = futures.Future()
         futs.append(fut)
         self._queue.put((func, fut))
     return futs
Exemplo n.º 24
0
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('--profile',
                        "-p",
                        dest='profile',
                        action='store_true',
                        default=False,
                        help='profile instead of gather timing'
                        ' (default: False)')
    parser.add_argument('--dummies',
                        "-d",
                        dest='dummies',
                        action='store',
                        type=int,
                        default=100,
                        metavar="<number>",
                        help='how many dummy/no-op tasks to inject'
                        ' (default: 100)')
    parser.add_argument('--limit',
                        '-l',
                        dest='limit',
                        action='store',
                        type=float,
                        default=100.0,
                        metavar="<number>",
                        help='percentage of profiling output to show'
                        ' (default: 100%%)')
    args = parser.parse_args()
    if args.profile:
        ctx_manager = ProfileIt
    else:
        ctx_manager = TimeIt
    dummy_am = max(0, args.dummies)
    with ctx_manager("Building linear flow with %s tasks" % dummy_am, args):
        f = lf.Flow("root")
        for i in compat_range(0, dummy_am):
            f.add(DummyTask(name="dummy_%s" % i))
    with ctx_manager("Loading", args):
        e = engines.load(f)
    with ctx_manager("Compiling", args):
        e.compile()
    with ctx_manager("Preparing", args):
        e.prepare()
    with ctx_manager("Validating", args):
        e.validate()
    with ctx_manager("Running", args):
        e.run()
Exemplo n.º 25
0
 def run(self, funcs):
     if self._workers:
         raise RuntimeError("Can not start another `run` with %s"
                            " existing workers" % (len(self._workers)))
     self._queue = compat_queue.Queue()
     self._death.clear()
     futs = []
     for i in compat_range(0, self._max_workers):
         w = threading.Thread(target=_chained_worker,
                              args=(i + 1, self._death, self._queue, futs))
         w.daemon = True
         w.start()
         self._workers.append(w)
     for func in funcs:
         fut = futures.Future()
         futs.append(fut)
         self._queue.put((func, fut))
     return futs
Exemplo n.º 26
0
    def __call__(self, value):
        value = str(value)
        num = "0|-?[1-9][0-9]*"
        m = re.match("^(%s)(?:-(%s))?$" % (num, num), value)
        if not m:
            raise ValueError('Invalid Range: %s' % value)
        left = int(m.group(1))
        right = int(left if m.group(2) is None else m.group(2))

        if left < right:
            left = Integer(min=self.min)(left)
            right = Integer(max=self.max)(right)
            step = 1
        else:
            left = Integer(max=self.max)(left)
            right = Integer(min=self.min)(right)
            step = -1
        if self.inclusive:
            right += step
        return compat_range(left, right, step)
Exemplo n.º 27
0
    def __call__(self, value):
        value = str(value)
        num = "0|-?[1-9][0-9]*"
        m = re.match("^(%s)(?:-(%s))?$" % (num, num), value)
        if not m:
            raise ValueError('Invalid Range: %s' % value)
        left = int(m.group(1))
        right = int(left if m.group(2) is None else m.group(2))

        if left < right:
            left = Integer(min=self.min)(left)
            right = Integer(max=self.max)(right)
            step = 1
        else:
            left = Integer(max=self.max)(left)
            right = Integer(min=self.min)(right)
            step = -1
        if self.inclusive:
            right += step
        return compat_range(left, right, step)
Exemplo n.º 28
0
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('--profile', "-p",
                        dest='profile', action='store_true',
                        default=False,
                        help='profile instead of gather timing'
                             ' (default: False)')
    parser.add_argument('--dummies', "-d",
                        dest='dummies', action='store', type=int,
                        default=100, metavar="<number>",
                        help='how many dummy/no-op tasks to inject'
                             ' (default: 100)')
    parser.add_argument('--limit', '-l',
                        dest='limit', action='store', type=float,
                        default=100.0, metavar="<number>",
                        help='percentage of profiling output to show'
                             ' (default: 100%%)')
    args = parser.parse_args()
    if args.profile:
        ctx_manager = ProfileIt
    else:
        ctx_manager = TimeIt
    dummy_am = max(0, args.dummies)
    with ctx_manager("Building linear flow with %s tasks" % dummy_am, args):
        f = lf.Flow("root")
        for i in compat_range(0, dummy_am):
            f.add(DummyTask(name="dummy_%s" % i))
    with ctx_manager("Loading", args):
        e = engines.load(f)
    with ctx_manager("Compiling", args):
        e.compile()
    with ctx_manager("Preparing", args):
        e.prepare()
    with ctx_manager("Validating", args):
        e.validate()
    with ctx_manager("Running", args):
        e.run()
Exemplo n.º 29
0
 def __iter__(self):
     if self.count <= 0:
         raise StopIteration()
     for i in compat_range(0, self.count):
         yield min(self.exponent ** i, self.max_backoff)
Exemplo n.º 30
0
 def test_fill_many_empty(self):
     result = list(iter_utils.fill(compat_range(0, 50), 500))
     self.assertEqual(450, sum(1 for x in result if x is None))
     self.assertEqual(50, sum(1 for x in result if x is not None))
Exemplo n.º 31
0
 def assertRange(self, s, r1, r2, step=1):
     self.assertEqual(list(compat_range(r1, r2, step)),
                      list(self.type_instance(s)))
Exemplo n.º 32
0
def _build_handler_from_jenkins(jenkins_client,
                                job_name,
                                restricted_ldap_groups=None,
                                description=None,
                                cmd_suffix='',
                                cmd_prefix=''):
    job = jenkins_client.get_job(job_name)
    if job is None:
        return None, None, None
    handles_what = {
        'args': {},
        'message_matcher':
        matchers.match_or(matchers.match_slack("message"),
                          matchers.match_telnet("message")),
        'followers': [
            ConsoleFollower,
            AbortFollower,
        ],
        'authorizer':
        auth.user_in_ldap_groups('admins_cloud'),
        'channel_matcher':
        matchers.match_channel(c.TARGETED),
    }
    cleaned_job_name = job_name.replace("-", " ").replace("_", " ")

    trigger_text = cleaned_job_name.lower()
    if cmd_suffix:
        trigger_text += " " + cmd_suffix
    if cmd_prefix:
        trigger_text = cmd_prefix + " " + trigger_text

    raw_param_defs = list(job.get_params())
    param_defs = collections.OrderedDict()
    for param in raw_param_defs:
        param_name = param['name']
        if param_name in param_defs:
            continue
        param_def = {}
        param_type = param['type']
        param_extra_description = ''
        if param_type in (
                'StringParameterDefinition',
                # TODO(harlowja): can we do validation?
                'ValidatingStringParameterDefinition'):
            param_def['type'] = str
        elif param_type == 'BooleanParameterDefinition':
            param_def['type'] = bool
            param_def['converter'] = hu.strict_bool_from_string
        elif param_type == 'ChoiceParameterDefinition':
            param_def['type'] = str
            choices = list(p.strip() for p in param['choices'] if p.strip())
            choices.sort()
            param_def['converter'] = functools.partial(utils.only_one_of,
                                                       choices)
            param_extra_description = "one of [%s]" % (", ".join(choices))
        else:
            raise RuntimeError("Unknown how to translate jenkins job '%s'"
                               " param '%s' type '%s' into a"
                               " python type: %s" %
                               (job_name, param_name, param_type, param))
        if 'defaultParameterValue' in param:
            param_def['default'] = param['defaultParameterValue']['value']
        if 'description' in param:
            param_description = param['description']
            if param_extra_description:
                # Do some cleanup on the existing description before
                # we mess with it (so that it formats nicer).
                param_description = param_description.strip()
                param_description = param_description.rstrip(".")
                param_description += " " + param_extra_description
            param_def['help'] = param_description
        elif param_extra_description:
            param_def['help'] = param_extra_description
        param_defs[param_name] = param_def

    args_converters = {}
    args_order = []
    args_defaults = {}
    args_help = {}
    for param_name, param_def in param_defs.items():
        args_order.append(param_name)
        if 'converter' in param_def:
            args_converters[param_name] = param_def['converter']
        if 'default' in param_def:
            args_defaults[param_name] = param_def['default']
        if 'help' in param_def:
            args_help[param_name] = param_def['help']

    handles_what['triggers'] = [
        trigger.Trigger(trigger_text, takes_args=bool(args_order)),
    ]

    handles_what['args']['help'] = args_help
    handles_what['args']['defaults'] = args_defaults
    handles_what['args']['converters'] = args_converters
    handles_what['args']['order'] = args_order

    if not description:
        description = "Initiates a %s build." % job_name

    job_cls_dct = {
        'handles_what': handles_what,
        'job_name': job_name,
        '__doc__': description,
        '__module__': __name__,
    }
    job_type_name = job_name
    job_type_name = job_type_name.replace("-", "_")
    job_type_name = job_type_name.replace(" ", "_")
    job_type_name = job_type_name.replace("\t", "_")
    job_type_name_pieces = job_type_name.split("_")
    for i in compat_range(0, len(job_type_name_pieces)):
        p = job_type_name_pieces[i]
        p = p.strip()
        if p:
            job_type_name_pieces[i] = p.title()
        else:
            job_type_name_pieces[i] = ''
    job_type_name = "%sJobHandler" % ("".join(job_type_name_pieces))
    job_type_name = str(job_type_name)
    job_cls = type(job_type_name, (JobHandler, ), job_cls_dct)
    return (job_type_name, job_cls, job_cls_dct)
Exemplo n.º 33
0
    engine_options = dict(BASE_SHARED_CONF)
    workers = []

    # These topics will be used to request worker information on; those
    # workers will respond with there capabilities which the executing engine
    # will use to match pending tasks to a matched worker, this will cause
    # the task to be sent for execution, and the engine will wait until it
    # is finished (a response is recieved) and then the engine will either
    # continue with other tasks, do some retry/failure resolution logic or
    # stop (and potentially re-raise the remote workers failure)...
    worker_topics = []

    try:
        # Create a set of worker threads to simulate actual remote workers...
        print('Running %s workers.' % (MEMORY_WORKERS))
        for i in compat_range(0, MEMORY_WORKERS):
            # Give each one its own unique topic name so that they can
            # correctly communicate with the engine (they will all share the
            # same exchange).
            worker_conf['topic'] = 'worker-%s' % (i + 1)
            worker_topics.append(worker_conf['topic'])
            w = worker.Worker(**worker_conf)
            runner = threading_utils.daemon_thread(w.run)
            runner.start()
            w.wait()
            workers.append((runner, w.stop))

        # Now use those workers to do something.
        print('Executing some work.')
        engine_options['topics'] = worker_topics
        result = run(engine_options)
 def test_fill_many_empty(self):
     result = list(iter_utils.fill(compat_range(0, 50), 500))
     self.assertEqual(450, sum(1 for x in result if x is None))
     self.assertEqual(50, sum(1 for x in result if x is not None))
Exemplo n.º 35
0
 def assertRange(self, s, r1, r2, step=1):
     self.assertEqual(list(compat_range(r1, r2, step)),
                      list(self.type_instance(s)))
Exemplo n.º 36
0
 def __iter__(self):
     if self.count <= 0:
         raise StopIteration()
     for i in compat_range(0, self.count):
         yield min(self.exponent**i, self.max_backoff)