Esempio n. 1
0
def test__get_jobs__get_job_summary(client):
    # Verify no jobs are returned before jobs are created.
    res = client.get_job_summary("")
    assert len(res.summaries) == 0

    res = client.get_jobs("")
    assert len(res.configs) == 0

    # Create two jobs under same role.
    test_dc_labrat_key = start_job_update(client, "test_dc_labrat_read.yaml",
                                          "start job update test/dc/labrat")
    test_dc_labrat_0_key = start_job_update(
        client, "test_dc_labrat0.yaml", "start job update test/dc/labrat0")

    # Different role should not show up.
    start_job_update(client, "test2_dc2_labrat2.yaml",
                     "start job update test2/dc2/labrat2")

    # Add some wait time for lucene index to build
    time.sleep(10)

    # reduce instance count by 1 for test/dc/labrat0
    client.kill_tasks(
        test_dc_labrat_0_key,
        {0},
        "killing instance 0 for task test/dc/labrat0",
    )
    wait_for_killed(client, test_dc_labrat_0_key, {0})

    # Ensure get_job_summary returns both jobs under role=test.
    res = client.get_job_summary(test_dc_labrat_key.role)
    assert len(res.summaries) == 2, "{jobs}".format(
        jobs=[s.job.key for s in res.summaries])

    assert_keys_equal(
        [s.job.key for s in res.summaries],
        [test_dc_labrat_key, test_dc_labrat_0_key],
    )

    for s in res.summaries:
        if s.job.key == test_dc_labrat_0_key:
            assert s.stats.activeTaskCount == 1
        else:
            assert s.stats.activeTaskCount == 2
        assert s.job.instanceCount == 2

    # Ensure get_jobs returns both jobs under role=test.
    res = client.get_jobs(test_dc_labrat_key.role)
    assert len(res.configs) == 2

    assert_keys_equal(
        [c.taskConfig.job for c in res.configs],
        [test_dc_labrat_key, test_dc_labrat_0_key],
    )

    for c in res.configs:
        if c.key == test_dc_labrat_0_key:
            assert c.instanceCount == 1
        else:
            assert c.instanceCount == 2
Esempio n. 2
0
def test__auto_rollback_with_pinned_instances__stopped_instances(client):
    """
    1. Create a job (v1).
    2. Start update  on the first subset of instances (v2).
    3. Start update on second subset of instances (v3).
    4. Stop some instances.
    5. Start a bad update on a subset consisting of at least
       one instance in each of v1, v2, v3 and stopped
    6. The instances should rollback to their respective previous good versions.
       The stopped instances in the bad update should transit to running.
    """
    all_instances = set([i for i in xrange(10)])
    # Create a job
    res = client.start_job_update(
        get_job_update_request("test_dc_labrat_large_job.yaml"),
        "start job update test/dc/labrat_large_job",
    )
    wait_for_rolled_forward(client, res.key)
    job_key = res.key.job

    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == 10
    for t in res.tasks:
        _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)
        assert run_id == "1"
        assert len(t.assignedTask.task.metadata) == 2
        for m in t.assignedTask.task.metadata:
            if m.key == "test_key_1":
                assert m.value == "test_value_1"
            elif m.key == "test_key_2":
                assert m.value == "test_value_2"
            else:
                assert False, "unexpected metadata %s" % m

    # start a update on first subset of instances
    update_instances_1 = set([4, 5, 6, 7])
    pinned_req = get_job_update_request(
        "test_dc_labrat_large_job_diff_labels.yaml")
    pinned_req.settings.updateOnlyTheseInstances = set(
        [api.Range(first=i, last=i) for i in update_instances_1])

    res = client.start_job_update(
        pinned_req,
        "start job update test/dc/labrat_large_job with pinned instances",
    )
    wait_for_rolled_forward(client, res.key)

    # Start another update on the second subset of instances
    update_instances_2 = set([8, 9])
    pinned_req = get_job_update_request(
        "test_dc_labrat_large_job_new_config.yaml")
    pinned_req.settings.updateOnlyTheseInstances = set(
        [api.Range(first=i, last=i) for i in update_instances_2])

    res = client.start_job_update(
        pinned_req,
        "start another job update test/dc/labrat_large_job with pinned instances",
    )
    wait_for_rolled_forward(client, res.key)

    # Stop some instances
    stop_instances = set([5, 8])
    client.kill_tasks(
        job_key,
        stop_instances,
        "killing instance 5, 8 for job test/dc/labrat_large_job",
    )
    wait_for_killed(client, job_key, stop_instances)

    # Start a bad update
    bad_update_instances = set([0, 5, 6, 9])
    pinned_req = get_job_update_request(
        "test_dc_labrat_large_job_bad_config.yaml")
    pinned_req.settings.updateOnlyTheseInstances = set(
        [api.Range(first=i, last=i) for i in bad_update_instances])
    pinned_req.settings.maxFailedInstances = 1
    pinned_req.settings.maxPerInstanceFailures = 1
    pinned_req.settings.updateGroupSize = 2

    res = client.start_job_update(
        pinned_req,
        "start a bad update test/dc/labrat_large_job with pinned instances",
    )
    wait_for_rolled_back(client, res.key)

    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(
        res.tasks) == (len(all_instances - stop_instances) +
                       len(bad_update_instances.intersection(stop_instances)))

    for t in res.tasks:
        if t.assignedTask.instanceId in update_instances_1:
            assert len(t.assignedTask.task.metadata) == 2
            for m in t.assignedTask.task.metadata:
                if m.key == "test_key_11":
                    assert m.value == "test_value_11"
                elif m.key == "test_key_22":
                    assert m.value == "test_value_22"
                else:
                    assert False, "unexpected metadata %s" % m
        elif t.assignedTask.instanceId in update_instances_2:
            print(t.assignedTask.instanceId)
            assert len(t.assignedTask.task.metadata) == 1
            for m in t.assignedTask.task.metadata:
                if m.key == "test_key_12":
                    assert m.value == "test_value_12"
                else:
                    assert False, "unexpected metadata %s" % m
        else:
            assert len(t.assignedTask.task.metadata) == 2
            for m in t.assignedTask.task.metadata:
                if m.key == "test_key_1":
                    assert m.value == "test_value_1"
                elif m.key == "test_key_2":
                    assert m.value == "test_value_2"
                else:
                    assert False, "unexpected metadata %s" % m

        if t.assignedTask.instanceId in (stop_instances -
                                         bad_update_instances):
            assert False, "unexpected start of stopped instance"
Esempio n. 3
0
def test__update_with_pinned_instances__deploy_stopped_instances_mixed(client):
    """
    test pinned instance deployment with mixed version and instance state
    1. start a regular update (version 1) on all instances
    2. stop subset of instances
    3. start a new update (version 2) targeting subset of instances
       (some of stopped instances included), expect targeted instances
       to be either brought up with newer version or updated with new
       version
    4. start regular update (version 1) again on another set of instances
       (some of previously stopped instances included, some of instances
       updated in previous step included), expect only stopped and
       instances affected previous step to be either brought up or updated
    """
    all_instances = set([i for i in xrange(10)])

    # start a regular update
    res = client.start_job_update(
        get_job_update_request("test_dc_labrat_large_job.yaml"),
        "start job update test/dc/labrat_large_job",
    )
    wait_for_rolled_forward(client, res.key)
    job_key = res.key.job

    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len(all_instances)
    for t in res.tasks:
        _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)
        assert run_id == "1"
        assert len(t.assignedTask.task.metadata) == 2
        for m in t.assignedTask.task.metadata:
            if m.key == "test_key_1":
                assert m.value == "test_value_1"
            elif m.key == "test_key_2":
                assert m.value == "test_value_2"
            else:
                assert False, "unexpected metadata %s" % m

    # stop subset of instances
    stop_instances = set([2, 8])
    client.kill_tasks(
        job_key,
        stop_instances,
        "killing instance 2, 8 for job test/dc/labrat_large_job",
    )
    wait_for_killed(client, job_key, stop_instances)
    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len(all_instances - stop_instances)
    for t in res.tasks:
        assert t.assignedTask.instanceId in (all_instances - stop_instances)

    # start a update with updateOnlyTheseInstances parameter
    # expected only instances which targeted by updateOnlyTheseInstances
    # to be updated, within which stopped ones are started.
    update_instances = set([3, 5, 8])
    pinned_req = get_job_update_request(
        "test_dc_labrat_large_job_diff_labels.yaml")
    pinned_req.settings.updateOnlyTheseInstances = set(
        [api.Range(first=i, last=i) for i in update_instances])

    res = client.start_job_update(
        pinned_req,
        "start second job update test/dc/labrat_large_job with pinned instances and label diff",
    )
    wait_for_rolled_forward(client, res.key)
    job_key = res.key.job

    res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))
    assert len(res.detailsList) == 1
    assert len(res.detailsList[0].instanceEvents) > 0
    for ie in res.detailsList[0].instanceEvents:
        assert ie.instanceId in update_instances

    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len((all_instances - stop_instances)
                                 | update_instances)
    for t in res.tasks:
        _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)
        assert len(t.assignedTask.task.metadata) == 2
        if t.assignedTask.instanceId in update_instances:
            assert run_id == "2"
            for m in t.assignedTask.task.metadata:
                if m.key == "test_key_11":
                    assert m.value == "test_value_11"
                elif m.key == "test_key_22":
                    assert m.value == "test_value_22"
                else:
                    assert False, (
                        "unexpected metadata %s for affected instances" % m)
        elif t.assignedTask.instanceId in (all_instances - stop_instances -
                                           update_instances):
            assert run_id == "1"
            for m in t.assignedTask.task.metadata:
                if m.key == "test_key_1":
                    assert m.value == "test_value_1"
                elif m.key == "test_key_2":
                    assert m.value == "test_value_2"
                else:
                    assert False, (
                        "unexpected metadata %s for affected instances" % m)
        else:
            assert False, ("unexpected instance id %s: should be stopped" %
                           t.assignedTask.instanceId)

    # start the regular update again same as the first one, targeting
    # subset of instances.
    # expect instance start / updated iff the instance has different config
    # or instance is stopped.
    update_2_instances = set([2, 3, 8, 9])
    pinned_req_2 = get_job_update_request(
        "test_dc_labrat_large_job_diff_executor.yaml")
    pinned_req_2.settings.updateOnlyTheseInstances = set(
        [api.Range(first=i, last=i) for i in update_2_instances])

    res = client.start_job_update(
        pinned_req_2, "start third job update test/dc/labrat_large_job")
    wait_for_rolled_forward(client, res.key)
    job_key = res.key.job

    res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))
    assert len(res.detailsList) == 1
    assert len(res.detailsList[0].instanceEvents) > 0
    for ie in res.detailsList[0].instanceEvents:
        # exclude instances that are previously running and still on
        # the first update
        assert ie.instanceId in (
            update_2_instances -
            (all_instances - update_instances - stop_instances))

    # Expected instances for each corresponding state:
    #
    #   v1s  - instances on original job config (v1) and stopped
    #   v1r1 - instances on original job config (v1) and running with run id 1
    #   v1r2 - instances on original job config (v1) and running with run id 2
    #   v1r3 - instances on original job config (v1) and running with run id 3
    #   v2r2 - instances on updated job config (v2) and running with run id 2
    #
    # How did we calculate the instance ids?
    #
    # Let T1, T2, T3, T4 be each of the four operations, which are
    #   T1 - start original update (v1 job config) for all instances (let it be A)
    #   T2 - stop subset of instances (let it be S)
    #   T3 - start new update (v2 job config) on subset of instances (let it be U1)
    #   T4 - start origin update again (v1 job config) on subset of instances (let it be U2)
    #
    # At T1:
    #   v1r1 = A
    #
    # At T2:
    #   v1s = S
    #   v1r1' = v1r1 - S = A - S
    #
    # At T3:
    #   v1s' = v1s - U1 = S - U1
    #   v2r1 = (empty set)
    #   v2r2 = U1
    #   v1r1'' = A - v2r2 - v1s' = A - U1 - (S - U1)
    #
    # At T4:
    #   v1s'' = v1s' - U2 = S - U1 - U2
    #   v1r2 = U2 & v1s' = U2 & (S - U1)
    #   v1r3 = U1 & U2
    #   v2r2' = v2r2 - U2 = U1 - U2
    #   v1r1''' = A - v1s'' - v1r2 - v1r3 - v2r2'
    v1s = stop_instances - update_instances - update_2_instances
    v1r2 = update_2_instances & (stop_instances - update_instances)
    v1r3 = update_instances & update_2_instances
    v2r2 = update_instances - update_2_instances
    v1r1 = all_instances - v1s - v1r2 - v1r3 - v2r2

    assert not v1s, "should not be any instances remain as stopped"
    assert v1r1, "expect instances to be in version 1 run id 1"
    assert v1r2, "expect instances to be in version 1 run id 2"
    assert v1r3, "expect instances to be in version 1 run id 3"
    assert v2r2, "expect instances to be in version 2 run id 2"

    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len(all_instances)
    for t in res.tasks:
        _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)
        assert len(t.assignedTask.task.metadata) == 2

        if t.assignedTask.instanceId in v1r1:
            # version 1, run 1
            assert run_id == "1"
            for m in t.assignedTask.task.metadata:
                if m.key == "test_key_1":
                    assert m.value == "test_value_1"
                elif m.key == "test_key_2":
                    assert m.value == "test_value_2"
                else:
                    assert False, (
                        "unexpected metadata %s for affected instances" % m)

        elif t.assignedTask.instanceId in v1r2:
            # version 1, run 2
            assert run_id == "2"
            for m in t.assignedTask.task.metadata:
                if m.key == "test_key_1":
                    assert m.value == "test_value_1"
                elif m.key == "test_key_2":
                    assert m.value == "test_value_2"
                else:
                    assert False, (
                        "unexpected metadata %s for affected instances" % m)

        elif t.assignedTask.instanceId in v1r3:
            # version 1, run 3
            assert run_id == "3"
            for m in t.assignedTask.task.metadata:
                if m.key == "test_key_1":
                    assert m.value == "test_value_1"
                elif m.key == "test_key_2":
                    assert m.value == "test_value_2"
                else:
                    assert False, (
                        "unexpected metadata %s for affected instances" % m)

        elif t.assignedTask.instanceId in v2r2:
            # version 2, run 2
            assert run_id == "2"
            for m in t.assignedTask.task.metadata:
                if m.key == "test_key_11":
                    assert m.value == "test_value_11"
                elif m.key == "test_key_22":
                    assert m.value == "test_value_22"
                else:
                    assert False, (
                        "unexpected metadata %s for affected instances" % m)

        else:
            assert False, ("unexpected instance id %s" %
                           t.assignedTask.instanceId)
Esempio n. 4
0
def test__update_with_pinned_instances__deploy_stopped_instances(client):
    """
    test pinned instance deployment with stop / deploy instances:
    1. start a regular update (version 1) on all instances
    2. stop subset of instances
    3. start a new update (version 2) targeting subset of instances
       (stopped instances included), expect stopped instances to be
       brought up with new version and other targeted instances to
       be updated
    4. start regular update (version 1) again on all instances, expect
       only instances affected by previous step to be updated
    """
    all_instances = set([i for i in xrange(10)])

    # start a regular update
    res = client.start_job_update(
        get_job_update_request("test_dc_labrat_large_job.yaml"),
        "start job update test/dc/labrat_large_job",
    )
    wait_for_rolled_forward(client, res.key)
    job_key = res.key.job

    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len(all_instances)
    for t in res.tasks:
        _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)
        assert run_id == "1"
        assert len(t.assignedTask.task.metadata) == 2
        for m in t.assignedTask.task.metadata:
            if m.key == "test_key_1":
                assert m.value == "test_value_1"
            elif m.key == "test_key_2":
                assert m.value == "test_value_2"
            else:
                assert False, "unexpected metadata %s" % m

    # stop subset of instances
    stop_instances = set([2, 8])
    client.kill_tasks(
        job_key,
        stop_instances,
        "killing instance 2, 8 for job test/dc/labrat_large_job",
    )
    wait_for_killed(client, job_key, stop_instances)
    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len(all_instances - stop_instances)
    for t in res.tasks:
        assert t.assignedTask.instanceId in (all_instances - stop_instances)

    # start a update with updateOnlyTheseInstances parameter
    # expect stopped instances to be started
    update_instances = set([2, 3, 5, 8])
    pinned_req = get_job_update_request(
        "test_dc_labrat_large_job_diff_labels.yaml")
    pinned_req.settings.updateOnlyTheseInstances = set(
        [api.Range(first=i, last=i) for i in update_instances])

    res = client.start_job_update(
        pinned_req,
        "start second job update test/dc/labrat_large_job with pinned instances and label diff",
    )
    wait_for_rolled_forward(client, res.key)
    job_key = res.key.job

    res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))
    assert len(res.detailsList) == 1
    assert len(res.detailsList[0].instanceEvents) > 0
    for ie in res.detailsList[0].instanceEvents:
        assert ie.instanceId in update_instances

    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len(all_instances)
    for t in res.tasks:
        _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)
        assert len(t.assignedTask.task.metadata) == 2
        if t.assignedTask.instanceId in update_instances:
            assert run_id == "2"
            for m in t.assignedTask.task.metadata:
                if m.key == "test_key_11":
                    assert m.value == "test_value_11"
                elif m.key == "test_key_22":
                    assert m.value == "test_value_22"
                else:
                    assert False, (
                        "unexpected metadata %s for affected instances" % m)
        elif t.assignedTask.instanceId in (all_instances - update_instances):
            assert run_id == "1"
            for m in t.assignedTask.task.metadata:
                if m.key == "test_key_1":
                    assert m.value == "test_value_1"
                elif m.key == "test_key_2":
                    assert m.value == "test_value_2"
                else:
                    assert False, (
                        "unexpected metadata %s for affected instances" % m)
        else:
            assert False, ("unexpected instance id %s" %
                           t.assignedTask.instanceId)

    # start the regular update again same as the first one
    # expect changes only for instances updated by previous update
    res = client.start_job_update(
        get_job_update_request("test_dc_labrat_large_job_diff_executor.yaml"),
        "start third job update test/dc/labrat_large_job",
    )
    wait_for_rolled_forward(client, res.key)
    job_key = res.key.job

    res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))
    assert len(res.detailsList) == 1
    assert len(res.detailsList[0].instanceEvents) > 0
    for ie in res.detailsList[0].instanceEvents:
        assert ie.instanceId in update_instances

    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len(all_instances)
    for t in res.tasks:
        _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)
        assert len(t.assignedTask.task.metadata) == 2
        if t.assignedTask.instanceId in update_instances:
            assert run_id == "3"
        elif t.assignedTask.instanceId in (all_instances - update_instances):
            assert run_id == "1"
        else:
            assert False, ("unexpected instance id %s" %
                           t.assignedTask.instanceId)

        for m in t.assignedTask.task.metadata:
            if m.key == "test_key_1":
                assert m.value == "test_value_1"
            elif m.key == "test_key_2":
                assert m.value == "test_value_2"
            else:
                assert False, (
                    "unexpected metadata %s for affected instances" % m)
Esempio n. 5
0
def test__update_with_pinned_instances__start_stopped_instances_all(client):
    """
    test pinned instance deployment with stop / start all instances:
    1. start a regular update (version 1) on all instances
    2. stop all instances
    3. start the same update (version 1) on all instances (stopped
       instances included), expect all instances to be updated and
       start running
    4. start regular update (version 1) again on all instances, expect
       no change on all instances
    """
    all_instances = set([i for i in xrange(10)])

    # start a regular update
    res = client.start_job_update(
        get_job_update_request("test_dc_labrat_large_job.yaml"),
        "start job update test/dc/labrat_large_job",
    )
    wait_for_rolled_forward(client, res.key)
    job_key = res.key.job

    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len(all_instances)
    for t in res.tasks:
        _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)
        assert run_id == "1"
        assert len(t.assignedTask.task.metadata) == 2
        for m in t.assignedTask.task.metadata:
            if m.key == "test_key_1":
                assert m.value == "test_value_1"
            elif m.key == "test_key_2":
                assert m.value == "test_value_2"
            else:
                assert False, "unexpected metadata %s" % m

    # stop all instances
    stop_instances = set([i for i in xrange(10)])
    client.kill_tasks(
        job_key,
        stop_instances,
        "killing all instances for job test/dc/labrat_large_job",
    )
    wait_for_killed(client, job_key, stop_instances)
    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == 0

    # start a update without updateOnlyTheseInstances parameter
    # expect all instances to be started
    update_instances = set([i for i in xrange(10)])

    res = client.start_job_update(
        get_job_update_request("test_dc_labrat_large_job_diff_executor.yaml"),
        "start second job update test/dc/labrat_large_job",
    )
    wait_for_rolled_forward(client, res.key)
    job_key = res.key.job

    res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))
    assert len(res.detailsList) == 1
    assert len(res.detailsList[0].instanceEvents) > 0
    for ie in res.detailsList[0].instanceEvents:
        assert ie.instanceId in (update_instances & stop_instances)

    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len(all_instances)
    for t in res.tasks:
        _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)
        assert run_id == "2"

        assert len(t.assignedTask.task.metadata) == 2
        for m in t.assignedTask.task.metadata:
            if m.key == "test_key_1":
                assert m.value == "test_value_1"
            elif m.key == "test_key_2":
                assert m.value == "test_value_2"
            else:
                assert False, (
                    "unexpected metadata %s for affected instances" % m)

    # start the regular update again same as the first one
    # expect no change for all instances
    res = client.start_job_update(
        get_job_update_request("test_dc_labrat_large_job.yaml"),
        "start third job update test/dc/labrat_large_job",
    )
    wait_for_rolled_forward(client, res.key)
    job_key = res.key.job

    res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))
    assert len(res.detailsList) == 1
    assert res.detailsList[0].instanceEvents is None

    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len(all_instances)
    for t in res.tasks:
        _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)
        if t.assignedTask.instanceId in stop_instances:
            assert run_id == "2"
        elif t.assignedTask.instanceId in (all_instances - stop_instances):
            assert run_id == "1"
        else:
            assert False, ("unexpected instance id %s" %
                           t.assignedTask.instanceId)

        assert len(t.assignedTask.task.metadata) == 2
        for m in t.assignedTask.task.metadata:
            if m.key == "test_key_1":
                assert m.value == "test_value_1"
            elif m.key == "test_key_2":
                assert m.value == "test_value_2"
            else:
                assert False, (
                    "unexpected metadata %s for affected instances" % m)
Esempio n. 6
0
def test__update_with_pinned_instances__stopped_instances(client):
    """
    test pinned instance deployment with stopped instances:
    1. start a regular update (version 1) on all instances
    2. stop subset of instances
    3. start another update (version 2) targeting subset of instances
       (stopped instances not included), expect only targeted instances
       to be updated and stopped instances remain stopped
    """
    all_instances = set([i for i in xrange(10)])

    # start a regular update
    res = client.start_job_update(
        get_job_update_request("test_dc_labrat_large_job.yaml"),
        "start job update test/dc/labrat_large_job",
    )
    wait_for_rolled_forward(client, res.key)
    job_key = res.key.job

    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len(all_instances)
    for t in res.tasks:
        _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)
        assert run_id == "1"
        assert len(t.assignedTask.task.metadata) == 2
        for m in t.assignedTask.task.metadata:
            if m.key == "test_key_1":
                assert m.value == "test_value_1"
            elif m.key == "test_key_2":
                assert m.value == "test_value_2"
            else:
                assert False, "unexpected metadata %s" % m

    # stop subset of instances
    stop_instances = set([1, 6])
    client.kill_tasks(
        job_key,
        stop_instances,
        "killing instance 1, 6 for job test/dc/labrat_large_job",
    )
    wait_for_killed(client, job_key, stop_instances)
    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len(all_instances - stop_instances)
    for t in res.tasks:
        assert t.assignedTask.instanceId in (all_instances - stop_instances)

    # start a update with updateOnlyTheseInstances parameter
    update_instances = set([0, 2, 3, 7, 9])
    pinned_req = get_job_update_request(
        "test_dc_labrat_large_job_diff_labels.yaml")
    pinned_req.settings.updateOnlyTheseInstances = set(
        [api.Range(first=i, last=i) for i in update_instances])

    res = client.start_job_update(
        pinned_req,
        "start job update test/dc/labrat_large_job with pinned instances",
    )
    wait_for_rolled_forward(client, res.key)
    job_key = res.key.job

    res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))
    assert len(res.detailsList) == 1
    assert len(res.detailsList[0].instanceEvents) > 0
    for ie in res.detailsList[0].instanceEvents:
        assert ie.instanceId in update_instances

    res = client.get_tasks_without_configs(
        api.TaskQuery(jobKeys={job_key},
                      statuses={api.ScheduleStatus.RUNNING}))
    assert len(res.tasks) == len(all_instances - stop_instances)

    # expect instance 0, 2, 3, 7, 9 to be updated to newer version, with run id 2
    # expect instance 1, 6 remain at stopped
    # expect instance 4, 5, 8 remain at original version, with run id 1
    for t in res.tasks:
        _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)
        if t.assignedTask.instanceId in update_instances:
            assert run_id == "2"
            assert len(t.assignedTask.task.metadata) == 2
            for m in t.assignedTask.task.metadata:
                if m.key == "test_key_11":
                    assert m.value == "test_value_11"
                elif m.key == "test_key_22":
                    assert m.value == "test_value_22"
                else:
                    assert False, (
                        "unexpected metadata %s for affected instances" % m)
        elif t.assignedTask.instanceId in (all_instances - stop_instances):
            assert run_id == "1"
            assert len(t.assignedTask.task.metadata) == 2
            for m in t.assignedTask.task.metadata:
                if m.key == "test_key_1":
                    assert m.value == "test_value_1"
                elif m.key == "test_key_2":
                    assert m.value == "test_value_2"
                else:
                    assert False, (
                        "unexpected metadata %s for unaffected instances" % m)
        else:
            assert False, ("unexpected instance id %s: should be stopped" %
                           t.assignedTask.instanceId)
Esempio n. 7
0
def test__get_tasks_without_configs_task_queries(client):
    # Verify no tasks are returned before creating.
    res = client.get_tasks_without_configs(api.TaskQuery())
    assert len(res.tasks) == 0

    # Create jobs.
    test_dc_labrat_key = start_job_update(client, "test_dc_labrat_read.yaml",
                                          "start job update test/dc/labrat")
    test_dc_labrat_0_key = start_job_update(
        client, "test_dc_labrat0.yaml", "start job update test/dc/labrat0")
    test_dc_0_labrat_1_key = start_job_update(
        client, "test_dc0_labrat1.yaml", "start job update test/dc0/labrat1")
    test_dc_labrat_1_key = start_job_update(
        client, "test_dc_labrat1.yaml", "start job update test/dc/labrat1")
    test2_dc2_labrat2_key = start_job_update(
        client, "test2_dc2_labrat2.yaml", "start job update test2/dc2/labrat2")

    # Add some wait time for lucene index to build
    time.sleep(10)

    # Kill one of the jobs.
    client.kill_tasks(test_dc_labrat_1_key, None,
                      "killing all tasks test/dc/labrat1")
    wait_for_killed(client, test_dc_labrat_1_key)

    for message, query, expected_job_keys in [
        (
            "query job keys",
            api.TaskQuery(jobKeys={
                test_dc_labrat_key,
                test_dc_labrat_0_key,
                test2_dc2_labrat2_key,
            }),
            [test_dc_labrat_key, test_dc_labrat_0_key, test2_dc2_labrat2_key],
        ),
        (
            "query role + env + name",
            api.TaskQuery(
                role=test_dc_labrat_key.role,
                environment=test_dc_labrat_key.environment,
                jobName=test_dc_labrat_key.name,
            ),
            [test_dc_labrat_key],
        ),
        (
            "query role + env",
            api.TaskQuery(
                role=test_dc_labrat_key.role,
                environment=test_dc_labrat_key.environment,
            ),
            [test_dc_labrat_key, test_dc_labrat_0_key, test_dc_labrat_1_key],
        ),
        (
            "query role",
            api.TaskQuery(role=test_dc_labrat_key.role),
            [
                test_dc_labrat_key,
                test_dc_labrat_0_key,
                test_dc_labrat_1_key,
                test_dc_0_labrat_1_key,
            ],
        ),
        (
            "query role + statuses",
            api.TaskQuery(
                role=test_dc_labrat_key.role,
                statuses={api.ScheduleStatus.RUNNING},
            ),
            [test_dc_labrat_key, test_dc_labrat_0_key, test_dc_0_labrat_1_key],
        ),
    ]:
        res = client.get_tasks_without_configs(query)
        # Expect 3 tasks per job key.
        assert len(res.tasks) == len(expected_job_keys) * 2, message
        assert_keys_equal(
            remove_duplicate_keys(t.assignedTask.task.job for t in res.tasks),
            expected_job_keys,
            message=message,
        )
Esempio n. 8
0
def test__get_tasks_without_configs_task_queries(client):
    # Verify no tasks are returned before creating.
    res = client.get_tasks_without_configs(api.TaskQuery())
    assert len(res.tasks) == 0

    # Create jobs.
    test_dc_labrat_key = start_job_update(client, 'test_dc_labrat_read.yaml',
                                          'start job update test/dc/labrat')
    test_dc_labrat_0_key = start_job_update(
        client, 'test_dc_labrat0.yaml', 'start job update test/dc/labrat0')
    test_dc_0_labrat_1_key = start_job_update(
        client, 'test_dc0_labrat1.yaml', 'start job update test/dc0/labrat1')
    test_dc_labrat_1_key = start_job_update(
        client, 'test_dc_labrat1.yaml', 'start job update test/dc/labrat1')
    test2_dc2_labrat2_key = start_job_update(
        client, 'test2_dc2_labrat2.yaml', 'start job update test2/dc2/labrat2')

    # Kill one of the jobs.
    client.kill_tasks(test_dc_labrat_1_key, {0, 1},
                      'killing all tasks test/dc/labrat1')
    wait_for_killed(client, test_dc_labrat_1_key)

    for message, query, expected_job_keys in [
        (
            'query job keys',
            api.TaskQuery(jobKeys={
                test_dc_labrat_key,
                test_dc_labrat_0_key,
                test2_dc2_labrat2_key,
            }),
            [
                test_dc_labrat_key,
                test_dc_labrat_0_key,
                test2_dc2_labrat2_key,
            ],
        ),
        (
            'query role + env + name',
            api.TaskQuery(
                role=test_dc_labrat_key.role,
                environment=test_dc_labrat_key.environment,
                jobName=test_dc_labrat_key.name,
            ),
            [test_dc_labrat_key],
        ),
        (
            'query role + env',
            api.TaskQuery(
                role=test_dc_labrat_key.role,
                environment=test_dc_labrat_key.environment,
            ),
            [
                test_dc_labrat_key,
                test_dc_labrat_0_key,
                test_dc_labrat_1_key,
            ],
        ),
        (
            'query role',
            api.TaskQuery(role=test_dc_labrat_key.role),
            [
                test_dc_labrat_key,
                test_dc_labrat_0_key,
                test_dc_labrat_1_key,
                test_dc_0_labrat_1_key,
            ],
        ),
        (
            'query role + statuses',
            api.TaskQuery(
                role=test_dc_labrat_key.role,
                statuses={api.ScheduleStatus.RUNNING},
            ),
            [
                test_dc_labrat_key,
                test_dc_labrat_0_key,
                test_dc_0_labrat_1_key,
            ],
        )
    ]:
        res = client.get_tasks_without_configs(query)
        # Expect 3 tasks per job key.
        assert len(res.tasks) == len(expected_job_keys) * 2, message
        assert_keys_equal(remove_duplicate_keys(t.assignedTask.task.job
                                                for t in res.tasks),
                          expected_job_keys,
                          message=message)