Example #1
0
def main():
    with example_utils.get_backend() as backend:
        logbooks = list(backend.get_connection().get_logbooks())
        for lb in logbooks:
            for fd in lb:
                if fd.state not in FINISHED_STATES:
                    resume(fd, backend)
Example #2
0
def main():
    with example_utils.get_backend() as backend:
        logbooks = list(backend.get_connection().get_logbooks())
        for lb in logbooks:
            for fd in lb:
                if fd.state not in FINISHED_STATES:
                    resume(fd, backend)
Example #3
0
def create_job(args):
    store = json.loads(args.details)
    book = logbook.LogBook(args.job_name)
    if example_utils.SQLALCHEMY_AVAILABLE:
        persist_path = os.path.join(tempfile.gettempdir(), "persisting.db")
        backend_uri = "sqlite:///%s" % (persist_path)
    else:
        persist_path = os.path.join(tempfile.gettempdir(), "persisting")
        backend_uri = "file:///%s" % (persist_path)
    with example_utils.get_backend(backend_uri) as backend:
        backend.get_connection().save_logbook(book)
        with jobboard(args.board_name, conf, persistence=backend) as jb:
            jb.post(args.job_name, book, details=store)
Example #4
0
def create_job(args):
    store = json.loads(args.details)
    book = logbook.LogBook(args.job_name)
    if example_utils.SQLALCHEMY_AVAILABLE:
        persist_path = os.path.join(tempfile.gettempdir(), "persisting.db")
        backend_uri = "sqlite:///%s" % (persist_path)
    else:
        persist_path = os.path.join(tempfile.gettempdir(), "persisting")
        backend_uri = "file:///%s" % (persist_path)
    with example_utils.get_backend(backend_uri) as backend:
        backend.get_connection().save_logbook(book)
        with jobboard(args.board_name, conf, persistence=backend) as jb:
            jb.post(args.job_name, book, details=store)
Example #5
0
class TestTask(task.Task):
    def execute(self):
        print('executing %s' % self)
        return 'ok'


def flow_factory():
    return lf.Flow('resume from backend example').add(
        TestTask(name='first'), InterruptTask(name='boom'),
        TestTask(name='second'))


# INITIALIZE PERSISTENCE ####################################

with eu.get_backend() as backend:

    # Create a place where the persistence information will be stored.
    book = models.LogBook("example")
    flow_detail = models.FlowDetail("resume from backend example",
                                    uuid=uuidutils.generate_uuid())
    book.add(flow_detail)
    with contextlib.closing(backend.get_connection()) as conn:
        conn.save_logbook(book)

    # CREATE AND RUN THE FLOW: FIRST ATTEMPT ####################

    flow = flow_factory()
    engine = zag.engines.load(flow,
                              flow_detail=flow_detail,
                              book=book,
            gf.Flow("volume-maker").add(
                AllocateVolumes("allocate_my_volumes", provides='volumes'),
                FormatVolumes("volume_formatter"),
            ),
            # Finally boot it all.
            BootVM("boot-it"),
        ),
        # Ya it worked!
        PrintText("Finished vm create.", no_slow=True),
        PrintText("Instance is running!", no_slow=True))
    return flow

eu.print_wrapped("Initializing")

# Setup the persistence & resumption layer.
with eu.get_backend() as backend:
    try:
        book_id, flow_id = sys.argv[2].split("+", 1)
        if not uuidutils.is_uuid_like(book_id):
            book_id = None
        if not uuidutils.is_uuid_like(flow_id):
            flow_id = None
    except (IndexError, ValueError):
        book_id = None
        flow_id = None

    # Set up how we want our engine to run, serial, parallel...
    executor = None
    if eventlet_utils.EVENTLET_AVAILABLE:
        executor = futures.GreenThreadPoolExecutor(5)
# Persist the flow and task state here, if the file/dir exists already blowup
# if not don't blowup, this allows a user to see both the modes and to see
# what is stored in each case.
if example_utils.SQLALCHEMY_AVAILABLE:
    persist_path = os.path.join(tempfile.gettempdir(), "persisting.db")
    backend_uri = "sqlite:///%s" % (persist_path)
else:
    persist_path = os.path.join(tempfile.gettempdir(), "persisting")
    backend_uri = "file:///%s" % (persist_path)

if os.path.exists(persist_path):
    blowup = False
else:
    blowup = True

with example_utils.get_backend(backend_uri) as backend:
    # Now we can run.
    engine_config = {
        'backend': backend,
        'engine_conf': 'serial',
        'book': logbook.LogBook("my-test"),
    }

    # Make a flow that will blowup if the file doesn't exist previously, if it
    # did exist, assume we won't blowup (and therefore this shows the undo
    # and redo that a flow will go through).
    flow = make_flow(blowup=blowup)
    print_wrapped("Running")
    try:
        eng = engines.load(flow, **engine_config)
        eng.run()
Example #8
0
# Persist the flow and task state here, if the file/dir exists already blow up
# if not don't blow up, this allows a user to see both the modes and to see
# what is stored in each case.
if eu.SQLALCHEMY_AVAILABLE:
    persist_path = os.path.join(tempfile.gettempdir(), "persisting.db")
    backend_uri = "sqlite:///%s" % (persist_path)
else:
    persist_path = os.path.join(tempfile.gettempdir(), "persisting")
    backend_uri = "file:///%s" % (persist_path)

if os.path.exists(persist_path):
    blowup = False
else:
    blowup = True

with eu.get_backend(backend_uri) as backend:
    # Make a flow that will blow up if the file didn't exist previously, if it
    # did exist, assume we won't blow up (and therefore this shows the undo
    # and redo that a flow will go through).
    book = models.LogBook("my-test")
    flow = make_flow(blowup=blowup)
    eu.print_wrapped("Running")
    try:
        eng = engines.load(flow, engine='serial', backend=backend, book=book)
        eng.run()
        if not blowup:
            eu.rm_path(persist_path)
    except Exception:
        # NOTE(harlowja): don't exit with non-zero status code, so that we can
        # print the book contents, as well as avoiding exiting also makes the
        # unit tests (which also runs these examples) pass.
class TestTask(task.Task):
    def execute(self):
        print('executing %s' % self)
        return 'ok'


def flow_factory():
    return lf.Flow('resume from backend example').add(
        TestTask(name='first'),
        InterruptTask(name='boom'),
        TestTask(name='second'))


### INITIALIZE PERSISTENCE ####################################

with example_utils.get_backend() as backend:
    logbook = p_utils.temporary_log_book(backend)

    ### CREATE AND RUN THE FLOW: FIRST ATTEMPT ####################

    flow = flow_factory()
    flowdetail = p_utils.create_flow_detail(flow, logbook, backend)
    engine = taskflow.engines.load(flow, flow_detail=flowdetail,
                                   backend=backend)

    print_task_states(flowdetail, "At the beginning, there is no state")
    print_wrapped("Running")
    engine.run()
    print_task_states(flowdetail, "After running")

    ### RE-CREATE, RESUME, RUN ####################################
Example #10
0
                print("Taking a well deserved break.")
            print("Your drive %s has been certified." % (v))


# Setup the set of things to do (mini-cinder).
flow = lf.Flow("root").add(
    PrintText("Starting volume create", no_slow=True),
    gf.Flow('maker').add(
        CreateSpecForVolumes("volume_specs", provides='volume_specs'),
        PrintText("I need a nap, it took me a while to build those specs."),
        PrepareVolumes(),
    ),
    PrintText("Finished volume create", no_slow=True))

# Setup the persistence & resumption layer.
with example_utils.get_backend() as backend:
    try:
        book_id, flow_id = sys.argv[2].split("+", 1)
    except (IndexError, ValueError):
        book_id = None
        flow_id = None

    if not all([book_id, flow_id]):
        # If no 'tracking id' (think a fedex or ups tracking id) is provided
        # then we create one by creating a logbook (where flow details are
        # stored) and creating a flow detail (where flow and task state is
        # stored). The combination of these 2 objects unique ids (uuids) allows
        # the users of zag to reassociate the workflows that were
        # potentially running (and which may have partially completed) back
        # with zag so that those workflows can be resumed (or reverted)
        # after a process/thread/engine has failed in someway.
Example #11
0
# Persist the flow and task state here, if the file/dir exists already blowup
# if not don't blowup, this allows a user to see both the modes and to see
# what is stored in each case.
if example_utils.SQLALCHEMY_AVAILABLE:
    persist_path = os.path.join(tempfile.gettempdir(), "persisting.db")
    backend_uri = "sqlite:///%s" % (persist_path)
else:
    persist_path = os.path.join(tempfile.gettempdir(), "persisting")
    backend_uri = "file:///%s" % (persist_path)

if os.path.exists(persist_path):
    blowup = False
else:
    blowup = True

with example_utils.get_backend(backend_uri) as backend:
    # Now we can run.
    engine_config = {
        'backend': backend,
        'engine_conf': 'serial',
        'book': logbook.LogBook("my-test"),
    }

    # Make a flow that will blowup if the file doesn't exist previously, if it
    # did exist, assume we won't blowup (and therefore this shows the undo
    # and redo that a flow will go through).
    flow = make_flow(blowup=blowup)
    print_wrapped("Running")
    try:
        eng = engines.load(flow, **engine_config)
        eng.run()
Example #12
0
# Persist the flow and task state here, if the file/dir exists already blow up
# if not don't blow up, this allows a user to see both the modes and to see
# what is stored in each case.
if eu.SQLALCHEMY_AVAILABLE:
    persist_path = os.path.join(tempfile.gettempdir(), "persisting.db")
    backend_uri = "sqlite:///%s" % (persist_path)
else:
    persist_path = os.path.join(tempfile.gettempdir(), "persisting")
    backend_uri = "file:///%s" % (persist_path)

if os.path.exists(persist_path):
    blowup = False
else:
    blowup = True

with eu.get_backend(backend_uri) as backend:
    # Make a flow that will blow up if the file didn't exist previously, if it
    # did exist, assume we won't blow up (and therefore this shows the undo
    # and redo that a flow will go through).
    book = logbook.LogBook("my-test")
    flow = make_flow(blowup=blowup)
    eu.print_wrapped("Running")
    try:
        eng = engines.load(flow, engine='serial',
                           backend=backend, book=book)
        eng.run()
        if not blowup:
            eu.rm_path(persist_path)
    except Exception:
        # NOTE(harlowja): don't exit with non-zero status code, so that we can
        # print the book contents, as well as avoiding exiting also makes the