def disable_pipeline(pipeline_name="", dry_run=True):

    if dry_run:
        print("Dry run: only printing what would be done")
    else:
        print("Disabling pipelines")

    ws = get_workspace()

    # Get all published pipeline objects in the workspace
    all_pub_pipelines = PublishedPipeline.get_all(ws)

    # We will iterate through the list of published pipelines and
    # use the last ID in the list for Schedule operations:
    print("Published pipelines found in the workspace:")
    for pub_pipeline in all_pub_pipelines:
        if pub_pipeline.name.startswith(
                'prednet'
        ) and pub_pipeline.name == pipeline_name or pipeline_name == "":
            print("Found pipeline:", pub_pipeline.name, pub_pipeline.id)
            pub_pipeline_id = pub_pipeline.id
            schedules = Schedule.get_all(ws, pipeline_id=pub_pipeline_id)

            # We will iterate through the list of schedules and
            # use the last ID in the list for further operations:
            print("Found these schedules for the pipeline id {}:".format(
                pub_pipeline_id))
            for schedule in schedules:
                print(schedule.name, schedule.id)
                if not dry_run:
                    schedule_id = schedule.id
                    print("Schedule id to be used for schedule operations: {}".
                          format(schedule_id))
                    fetched_schedule = Schedule.get(ws, schedule_id)
                    print("Using schedule with id: {}".format(
                        fetched_schedule.id))
                    fetched_schedule.disable(wait_for_provisioning=True)
                    fetched_schedule = Schedule.get(ws, schedule_id)
                    print("Disabled schedule {}. New status is: {}".format(
                        fetched_schedule.id, fetched_schedule.status))

            if not dry_run:
                print("Disabling pipeline")
                pub_pipeline.disable()
def find_pipeline(ws, name):
    all_pub_pipelines = PublishedPipeline.get_all(ws)
    for p in all_pub_pipelines:
        if p.name == name:
            return p
Beispiel #3
0
print("Blobstore's name: {}".format(def_blob_store.name))

# create a list of datasets stored in blob
print("Checking for new datasets")
blob_service = BlockBlobService(def_blob_store.account_name, def_blob_store.account_key)
generator = blob_service.list_blobs(def_blob_store.container_name, prefix="prednet/data/video")
datasets = []
for blob in generator:
    dataset = blob.name.split('/')[3]
    if dataset not in datasets and dataset.startswith("UCSD") and not dataset.endswith("txt"):
        datasets.append(dataset)
        print("Found dataset:", dataset)

# Get all published pipeline objects in the workspace
all_pub_pipelines = PublishedPipeline.get_all(ws)

# Create a list of datasets for which we have (old) and don't have (new) a published pipeline
old_datasets = []
new_datasets = []
for dataset in datasets:
    for pub_pipeline in all_pub_pipelines:
        if pub_pipeline.name.endswith(dataset):
            old_datasets.append(dataset)
    if not dataset in old_datasets:
        new_datasets.append(dataset)

for dataset in new_datasets:
    print("Creating pipeline for dataset", dataset)
    build_pipeline(dataset, ws, config)