def _execute_and_cancel_execution(self,
                                      force=False,
                                      kill_cancel=False,
                                      wait_for_component=True,
                                      verify_intermediate_state=True):
        # component's blueprint
        sleep_blueprint = resource('dsl/sleep_node.yaml')
        self.client.blueprints.upload(sleep_blueprint, entity_id='basic')

        main_blueprint = resource(
            'dsl/component_with_blueprint_id.yaml')
        test_id = uuid.uuid1()
        blueprint_id = 'blueprint_{0}'.format(test_id)
        deployment_id = 'deployment_{0}'.format(test_id)
        self.client.blueprints.upload(main_blueprint, blueprint_id)
        self.client.deployments.create(blueprint_id, deployment_id,
                                       skip_plugins_validation=True)
        do_retries(verify_deployment_env_created,
                   30,
                   deployment_id=deployment_id)
        execution = self.client.executions.start(deployment_id, 'install')

        if wait_for_component:
            self._wait_for_component_install('component')

        execution = self.client.executions.cancel(execution.id,
                                                  force,
                                                  kill=kill_cancel)
        self._verify_cancel_install_execution(execution,
                                              force,
                                              kill_cancel,
                                              verify_intermediate_state)

        return execution
 def _local_task_fail_impl(self, wf_name):
     if self.do_get:
         self.deploy_and_execute_workflow(
             resource('dsl/workflow_api.yaml'), wf_name,
             parameters={'do_get': self.do_get})
     else:
         self.assertRaises(RuntimeError,
                           self.deploy_and_execute_workflow,
                           resource('dsl/workflow_api.yaml'),
                           wf_name,
                           parameters={'do_get': self.do_get})
 def test_component_creation_with_blueprint_id(self):
     basic_blueprint_path = resource('dsl/basic.yaml')
     self.client.blueprints.upload(basic_blueprint_path,
                                   entity_id='basic')
     deployment_id = 'd{0}'.format(uuid.uuid4())
     dsl_path = resource('dsl/component_with_blueprint_id.yaml')
     self.deploy_application(dsl_path, deployment_id=deployment_id)
     self.assertTrue(self.client.deployments.get(self.component_name))
     self.undeploy_application(deployment_id, is_delete_deployment=True)
     self.assertRaises(CloudifyClientError,
                       self.client.deployments.get,
                       self.component_name)
     self.assertRaises(CloudifyClientError,
                       self.client.deployments.get,
                       deployment_id)
    def test_delete_botched_deployment(self):
        dsl_path = resource('dsl/basic.yaml')
        _id = uuid.uuid1()
        blueprint_id = 'blueprint_{0}'.format(_id)
        deployment_id = 'deployment_{0}'.format(_id)

        data = {deployment_id: {'raise_exception_on_delete': True}}
        agent_json_path = os.path.join(PLUGIN_STORAGE_DIR, 'agent.json')
        self.write_data_to_file_on_manager(
            data,
            agent_json_path,
            to_json=True,
            owner=CLOUDIFY_USER
        )

        self.client.blueprints.upload(dsl_path, blueprint_id)
        self.client.deployments.create(blueprint_id, deployment_id,
                                       skip_plugins_validation=True)
        execution = self.client.executions.list(deployment_id=deployment_id)[0]
        self.wait_for_execution_to_end(execution)

        self.client.deployments.delete(deployment_id)
        wait_for_deployment_deletion_to_complete(deployment_id)
        try:
            self.client.deployments.get(deployment_id)
            self.fail("Expected deployment to be deleted")
        except CloudifyClientError as e:
            self.assertEquals(404, e.status_code)
    def test_schedule_execution_while_execution_running_under_same_dep(self):
        """
        Start an execution and while it is running schedule an execution
        for the future, under the same deployment.

        """
        # The token in the container is invalid, create new valid one
        create_api_token()
        dsl_path = resource('dsl/sleep_workflows.yaml')
        dep = self.deploy(dsl_path, wait=False, client=self.client)
        dep_id = dep.id
        do_retries(verify_deployment_env_created, 30, deployment_id=dep_id)
        execution1 = self.client.executions.start(deployment_id=dep_id,
                                                  workflow_id='install')
        self._wait_for_exec_to_end_and_modify_status(execution1,
                                                     Execution.STARTED)

        scheduled_time = generate_scheduled_for_date()
        execution2 = self.client.executions.start(deployment_id=dep_id,
                                                  workflow_id='install',
                                                  schedule=scheduled_time)
        self._assert_execution_status(execution2.id, Execution.SCHEDULED)

        self.client.executions.update(execution1.id, Execution.TERMINATED)

        time.sleep(62)  # Wait for exec to 'wake up'
        self.wait_for_execution_to_end(execution2)
    def test_deployment_logs(self):
        message = 'TEST MESSAGE'
        inputs = {'message': message}

        dsl_path = resource("dsl/deployment_logs.yaml")
        deployment, _ = self.deploy_application(dsl_path, inputs=inputs)

        deployment_log_path = ('/var/log/cloudify/mgmtworker/logs/{0}.log'
                               .format(deployment.id))

        def read_deployment_logs():
            return self.read_manager_file(deployment_log_path, no_strip=True)

        def verify_logs_exist_with_content():
            deployment_logs = read_deployment_logs()
            self.assertIn(message, deployment_logs)
            return len(deployment_logs)

        log_file_size = verify_logs_exist_with_content()

        self.undeploy_application(deployment.id, is_delete_deployment=True)

        # Verify log file id truncated on deployment delete
        self._assert_log_file_truncated(read_deployment_logs, log_file_size)

        deployment, _ = self.deploy_application(
                dsl_path, inputs=inputs,
                deployment_id=deployment.id)

        # Verify new deployment with the same deployment id
        # can write to the previous location.
        verify_logs_exist_with_content()
 def setUp(self):
     super(BaseExistingVMTest, self).setUp()
     self.setup_deployment_id = 'd{0}'.format(uuid.uuid4())
     self.setup_node_id = 'setup_host'
     dsl_path = resource("dsl/agent_tests/existing-vm-setup.yaml")
     self.deploy_application(dsl_path,
                             deployment_id=self.setup_deployment_id)
 def test_post_source_started_location_target(self):
     dsl_path = resource(
         "dsl/relationship_interface_post_source_location_target.yaml")
     deployment, _ = self.deploy_application(dsl_path)
     self.verify_assertions(deployment.id,
                            hook='post-init',
                            runs_on_source=False)
    def _upload_v_2_0_plugin(self):
        source_dir = resource('plugins/{0}'.format(self.plugin_name))
        target_dir = os.path.join(self.workdir, self.plugin_name)
        shutil.copytree(source_dir, target_dir)

        self._replace_version(target_dir)
        self.upload_mock_plugin(self.plugin_name, plugin_path=target_dir)
    def test_given_deployment_name_with_auto_inc_suffix_option(self):
        basic_blueprint_path = resource('dsl/basic.yaml')
        self.client.blueprints.upload(basic_blueprint_path,
                                      entity_id='basic')
        deployment_id = 'd{0}'.format(uuid.uuid4())
        main_blueprint = """
tosca_definitions_version: cloudify_dsl_1_3

imports:
  - cloudify/types/types.yaml

node_templates:

  component_node:
    type: cloudify.nodes.Component
    properties:
      resource_config:
        blueprint:
          external_resource: true
          id: basic
        deployment:
          id: component
          auto_inc_suffix: true
    capabilities:
        scalable:
            properties:
                default_instances: 2
"""
        blueprint_path = self.make_yaml_file(main_blueprint)
        self.deploy_application(blueprint_path, deployment_id=deployment_id)
        deployments = self.client.deployments.list(_include=['id'])
        self.assertEqual(len(deployments), 3)
        self.undeploy_application(deployment_id, is_delete_deployment=True)
        deployments = self.client.deployments.list(_include=['id'])
        self.assertEqual(len(deployments), 0)
    def test_simple(self):
        parameters = {
            'do_get': self.do_get,
            'key': 'key1',
            'value': 'value1'
        }
        result_dict = {
            'key1': 'value1'
        }
        deployment, _ = self.deploy_and_execute_workflow(
                resource('dsl/workflow_api.yaml'),
                self._testMethodName,
                parameters=parameters)

        # testing workflow remote task
        invocation = self.get_plugin_data(
            plugin_name='testmockoperations',
            deployment_id=deployment.id
        )['mock_operation_invocation'][0]
        self.assertDictEqual(result_dict, invocation)

        # testing workflow local task
        instance = self.client.node_instances.list(
            deployment_id=deployment.id)[0]
        self.assertEqual('test_state', instance.state)
    def test_queue_system_execution_while_execution_is_running(self):

        # Create deployment and start 'install' execution
        dsl_path = resource("dsl/basic.yaml")
        deployment = self.deploy(dsl_path)
        execution = self.execute_workflow(workflow_name='install',
                                          deployment_id=deployment.id,
                                          wait_for_execution=False,
                                          queue=True)
        # Make sure the install execution stays 'started'
        execution = self._wait_for_exec_to_end_and_modify_status(
            execution, Execution.STARTED)

        # Create a system execution and make sure it's being queued
        second_snap = self.client.snapshots.create('snapshot_2',
                                                   include_metrics=True,
                                                   include_credentials=True,
                                                   include_logs=True,
                                                   include_events=True,
                                                   queue=True)
        self._assert_execution_status(second_snap.id, Execution.QUEUED)

        # Update first snapshot state to 'terminated', so the second snapshot
        #  will start.
        self._update_to_terminated_and_assert_propper_dequeue(
            execution.id, second_snap.id)
 def test_illegal_non_graph_to_graph_mode(self):
     if not self.do_get:
         # no need to run twice
         return
     self.assertRaises(RuntimeError, self.deploy_and_execute_workflow,
                       resource('dsl/workflow_api.yaml'),
                       self._testMethodName)
    def test_queue_execution_while_execution_is_running_under_same_dep(self):

        # Create deployment
        dsl_path = resource("dsl/basic.yaml")
        deployment = self.deploy(dsl_path)

        # Start 'install' execution and
        execution_1 = self.execute_workflow(workflow_name='install',
                                            deployment_id=deployment.id,
                                            wait_for_execution=False,
                                            queue=True)

        # Make sure the install execution stays 'started'
        execution_1 = self._wait_for_exec_to_end_and_modify_status(
            execution_1, Execution.STARTED)

        # Start a second 'install' under the same deployment and assert it's
        # being queued
        execution_2 = self.execute_workflow(workflow_name='uninstall',
                                            deployment_id=deployment.id,
                                            wait_for_execution=False,
                                            queue=True)
        self._assert_execution_status(execution_2.id, Execution.QUEUED)

        # Update first snapshot state to 'terminated', so the second snapshot
        #  will start.
        self._update_to_terminated_and_assert_propper_dequeue(
            execution_1.id, execution_2.id)
 def test_cancel_on_wait_for_task_termination(self):
     _, eid = self.deploy_and_execute_workflow(
         resource('dsl/workflow_api.yaml'), self._testMethodName,
         parameters={'do_get': self.do_get}, wait_for_execution=False)
     self.wait_for_execution_status(eid, status=Execution.STARTED)
     self.client.executions.cancel(eid)
     self.wait_for_execution_status(eid, status=Execution.CANCELLED)
    def test_update_execution_status(self):
        dsl_path = resource("dsl/basic.yaml")
        _, execution_id = self.deploy_application(dsl_path,
                                                  wait_for_execution=True)
        execution = self.client.executions.get(execution_id)
        self.assertEquals(Execution.TERMINATED, execution.status)

        # Manually updating the status, because the client checks for
        # correct transitions
        postgresql.run_query(
            "UPDATE executions SET status='started' "
            "WHERE id='{0}'".format(execution_id)
        )
        execution = self.client.executions.get(execution_id)
        self.assertEquals(Execution.STARTED, execution.status)
        execution = self.client.executions.update(execution_id,
                                                  'pending',
                                                  'some-error')
        self.assertEquals(Execution.PENDING, execution.status)
        self.assertEquals('some-error', execution.error)
        # verifying that updating only the status field also resets the
        # error field to an empty string
        execution = self.client.executions.update(execution_id,
                                                  Execution.TERMINATED)
        self.assertEquals(Execution.TERMINATED, execution.status)
        self.assertEquals('', execution.error)
    def test_two_scheduled_execution_same_tenant(self):
        """
        Schedule 2 executions to start a second apart.
        """
        # The token in the container is invalid, create new valid one
        create_api_token()
        dsl_path = resource('dsl/basic.yaml')
        dep1 = self.deploy(dsl_path, wait=False, client=self.client)
        dep2 = self.deploy(dsl_path, wait=False, client=self.client)
        dep1_id = dep1.id
        dep2_id = dep2.id
        do_retries(verify_deployment_env_created, 30, deployment_id=dep1_id)
        do_retries(verify_deployment_env_created, 30, deployment_id=dep2_id)
        scheduled_time = generate_scheduled_for_date()
        execution1 = self.client.executions.start(deployment_id=dep1_id,
                                                  workflow_id='install',
                                                  schedule=scheduled_time)
        execution2 = self.client.executions.start(deployment_id=dep2_id,
                                                  workflow_id='install',
                                                  schedule=scheduled_time)
        self._assert_execution_status(execution1.id, Execution.SCHEDULED)
        self._assert_execution_status(execution2.id, Execution.SCHEDULED)

        time.sleep(62)  # Wait for exec to 'wake up'
        self.wait_for_execution_to_end(execution1)
        self.wait_for_execution_to_end(execution2)
    def test_schedule_execution_and_create_snapshot_same_tenant(self):
        """
        Schedule an execution, then create snapshot.
        Execution 'wakes up' while snapshot is still running, so it becomes
        'queued' and start when snapshot terminates.
        """
        # The token in the container is invalid, create new valid one
        create_api_token()
        dsl_path = resource('dsl/sleep_workflows.yaml')
        dep = self.deploy(dsl_path, wait=False, client=self.client)
        dep_id = dep.id
        do_retries(verify_deployment_env_created, 30, deployment_id=dep_id)

        scheduled_time = generate_scheduled_for_date()
        execution = self.client.executions.start(deployment_id=dep_id,
                                                 workflow_id='install',
                                                 schedule=scheduled_time)
        self._assert_execution_status(execution.id, Execution.SCHEDULED)

        # Create snapshot and keep it's status 'started'
        snapshot = self._create_snapshot_and_modify_execution_status(
            Execution.STARTED)

        time.sleep(62)  # Wait for exec to 'wake up'
        self._assert_execution_status(execution.id, Execution.QUEUED)
        self.client.executions.update(snapshot.id, Execution.TERMINATED)
        self.wait_for_execution_to_end(execution)
 def test_component_creation_with_not_existing_blueprint_id(self):
     deployment_id = 'd{0}'.format(uuid.uuid4())
     dsl_path = resource('dsl/component_with_blueprint_id.yaml')
     self.assertRaises(RuntimeError,
                       self.deploy_application,
                       dsl_path,
                       deployment_id=deployment_id)
    def _test_deploy_with_agent_worker(self,
                                       blueprint,
                                       install_events,
                                       uninstall_events):
        deployment_id = 'd{0}'.format(uuid.uuid4())
        dsl_path = resource(blueprint)
        _, execution_id = self.deploy_application(dsl_path,
                                                  deployment_id=deployment_id)

        events = self.client.events.list(execution_id=execution_id,
                                         sort='timestamp')
        filtered_events = [event['message'] for event in events if
                           event['message'] in install_events]

        # Make sure the install events were called (in the correct order)
        self.assertListEqual(install_events, filtered_events)

        execution_id = self.undeploy_application(deployment_id)

        events = self.client.events.list(execution_id=execution_id,
                                         sort='timestamp')
        filtered_events = [event['message'] for event in events if
                           event['message'] in uninstall_events]

        # Make sure the uninstall events were called (in the correct order)
        self.assertListEqual(uninstall_events, filtered_events)
 def test_cancel_on_task_retry_interval(self):
     self.configure(retries=2, interval=1000000)
     _, eid = self.deploy_and_execute_workflow(
         resource('dsl/workflow_api.yaml'), self._testMethodName,
         parameters={'do_get': self.do_get}, wait_for_execution=False)
     self.wait_for_execution_status(eid, status=Execution.STARTED)
     self.client.executions.cancel(eid)
     self.wait_for_execution_status(eid, status=Execution.CANCELLED)
 def test_node_instances_pagination(self):
     deployment = self.deploy(
             resource('dsl/pagination-node-instances.yaml'))
     partial_obj = partial(
         self.client.node_instances.list,
         deployment_id=deployment.id)
     num_of_nodes_instances = 9
     self._test_pagination(partial_obj, total=num_of_nodes_instances)
    def test_fail_to_create_deployment_while_creating_snapshot(self):
        # Create snapshot and make sure it's state remains 'started'
        self._create_snapshot_and_modify_execution_status(Execution.STARTED)

        dsl_path = resource('dsl/sleep_workflows.yaml')
        self._execute_unpermitted_operation_and_catch_exception(
            self.deploy, dsl_path
        )
 def test_deployment_modifications_sort(self):
     deployment = self.deploy(resource('dsl/sort.yaml'))
     for i in range(2, 12):
         modification = self.client.deployment_modifications.start(
             deployment_id=deployment.id,
             nodes={'node': {'instances': i}})
         self.client.deployment_modifications.finish(modification.id)
     self._test_sort('deployment_modifications', 'id')
 def test_executions_pagination(self):
     deployment = self.deploy(resource('dsl/pagination.yaml'))
     for i in range(5):
         self.execute_workflow('install', deployment.id)
         self.execute_workflow('uninstall', deployment.id)
     total_executions = 11  # create_deployment_environment + 5 install/un
     self._test_pagination(partial(self.client.executions.list,
                                   deployment_id=deployment.id),
                           total=total_executions)
 def _test_custom_workflow(self, workflow, error_expected=False):
     deployment = self.deploy(resource("dsl/basic_task_not_exist.yaml"))
     try:
         self.execute_workflow(workflow, deployment.id)
         if error_expected:
             self.fail('RuntimeError expected')
     except RuntimeError as e:
         if not error_expected:
             self.fail('Success expected. error message: {0}'.format(e))
         self.assertIn(self.AGENT_ALIVE_FAIL, str(e))
 def test_secret_ssh_key_in_existing_vm(self):
     ssh_key_content = self._get_ssh_key_content()
     self.client.secrets.create('agent_key', ssh_key_content)
     dsl_path = resource(
         'dsl/agent_tests/secret-ssh-key-in-existing-vm.yaml'
     )
     inputs = {'ip': self._get_host_ip()}
     deployment, _ = self.deploy_application(dsl_path, inputs=inputs)
     plugin_data = self.get_plugin_data('testmockoperations', deployment.id)
     self.assertEqual(1, len(plugin_data['mock_operation_invocation']))
 def test_workflow_deployment_scaling_groups(self):
     deployment, _ = self.deploy_and_execute_workflow(
         resource('dsl/store-scaling-groups.yaml'),
         workflow_name='workflow')
     instance = self.client.node_instances.list(
         deployment_id=deployment.id
     )[0]
     self.assertEqual(
         ['node'],
         instance.runtime_properties['scaling_groups']['group1']['members'])
 def test_operation_mapping_override(self):
     dsl_path = resource("dsl/operation_mapping.yaml")
     deployment, _ = self.deploy_and_execute_workflow(dsl_path, 'workflow2')
     invocations = self.get_plugin_data(
         plugin_name='testmockoperations',
         deployment_id=deployment.id
     )['mock_operation_invocation']
     self.assertEqual(3, len(invocations))
     for invocation in invocations:
         self.assertEqual(1, len(invocation))
         self.assertEqual(invocation['test_key'], 'overridden_test_value')
 def _test_host_plugin_requires_old_package(self, blueprint_path):
     dsl_path = resource(blueprint_path)
     inputs = {
         'server_ip': self._get_host_ip(),
         'agent_private_key_path': self.get_host_key_path(
             node_id=self.setup_node_id,
             deployment_id=self.setup_deployment_id),
         'agent_user': '******'
     }
     deployment, _ = self.deploy_application(dsl_path, inputs=inputs)
     self.undeploy_application(deployment.id)
    def test_amqp_queues_list(self):
        """There's no additional queues after uninstalling the agent.

        We've seen queue leaks in the past, where queues or exchanges
        were not deleted. Check that uninstalling the agent, also removes
        its AMQP resources.
        """
        vhost = 'rabbitmq_vhost_default_tenant'
        deployment_id = 'd{0}'.format(uuid.uuid4())

        main_queues = self._get_queues()
        main_exchanges = self._get_exchanges()
        tenant_queues = self._get_queues(vhost)
        tenant_exchanges = self._get_exchanges(vhost)

        self.deploy_application(resource('dsl/agent_tests/with_agent.yaml'),
                                deployment_id=deployment_id)
        # installing the agent does nothing for the / vhost
        assert self._get_queues() == main_queues
        assert self._get_exchanges() == main_exchanges

        # after installing the agent, there's 2 new queues and at least
        # 1 new exchange
        agent_queues = self._get_queues(vhost) - tenant_queues
        agent_exchanges = self._get_exchanges(vhost) - tenant_exchanges
        assert len(agent_queues) == 2
        assert any(queue.endswith('_service') for queue in agent_queues)
        assert any(queue.endswith('_operation') for queue in agent_queues)
        assert any(exc.startswith('agent_host') for exc in agent_exchanges)
        # we already checked that there's an agent exchange, but there
        # might also exist a logs exchange and an events exchange, depending
        # if any events or logs were sent or not
        assert len(agent_exchanges) in (1, 2, 3)

        self.undeploy_application(deployment_id)

        main_queues = self._get_queues()
        main_exchanges = self._get_exchanges()
        tenant_queues = self._get_queues(vhost)
        agent_exchanges = self._get_exchanges(vhost) - tenant_exchanges
        # after uninstalling the agent, there's still no new queues on
        # the / vhost
        assert self._get_queues() == main_queues
        assert self._get_exchanges() == main_exchanges
        # there's no queues left over
        assert self._get_queues(vhost) == tenant_queues
        # the logs and events exchanges will still exist, but the agent
        # exchange must have been deleted
        assert not any(exc.startswith('agent_host') for exc in agent_exchanges)
    def test_deployment_statuses_after_queued_execution_finish(self):
        dsl_path = resource("dsl/basic.yaml")
        deployment = self.deploy(dsl_path)
        exe1, exe2 = self._force_deployment_to_be_queued(deployment.id)

        self.client.executions.update(exe1.id, Execution.TERMINATED)
        queued_execution = self.client.executions.get(exe2.id)
        self.wait_for_execution_to_end(queued_execution)

        deployment = self.client.deployments.get(deployment.id)
        self.assertEqual(deployment.latest_execution_status,
                         DeploymentState.COMPLETED)
        self.assertEqual(deployment.installation_status,
                         DeploymentState.ACTIVE)
        self.assertEqual(deployment.deployment_status, DeploymentState.GOOD)
Esempio n. 33
0
 def _test_client(self, client_version, url_version_postfix):
     shell_script_path = resource('scripts/test_old_rest_client.sh')
     python_script_path = resource('scripts/test_old_rest_client.py')
     result_path = os.path.join(self.workdir, 'result.json')
     env = os.environ.copy()
     env.update({
         'python_script_path': python_script_path,
         'client_version': client_version,
         'manager_ip': self.get_manager_ip(),
         'manager_user': utils.get_manager_username(),
         'manager_password': utils.get_manager_password(),
         'manager_tenant': utils.get_manager_tenant(),
         'url_version_postfix': url_version_postfix,
         'result_path': result_path
     })
     subprocess.check_call(shell_script_path,
                           shell=True,
                           cwd=self.workdir,
                           env=env)
     with open(result_path) as f:
         result = json.load(f)
     if result['failed']:
         self.fail('Failed to get manager status from old client. '
                   '[error={0}]'.format(result['details']))
Esempio n. 34
0
 def test_deployment_inputs(self):
     blueprint_id = str(uuid.uuid4())
     blueprint = self.client.blueprints.upload(resource("dsl/basic.yaml"),
                                               blueprint_id)
     inputs = blueprint.plan['inputs']
     self.assertEqual(1, len(inputs))
     self.assertTrue('install_agent' in inputs)
     self.assertFalse(inputs['install_agent']['default'])
     self.assertTrue(len(inputs['install_agent']['description']) > 0)
     deployment_id = str(uuid.uuid4())
     deployment = self.client.deployments.create(blueprint.id,
                                                 deployment_id)
     self.assertEqual(1, len(deployment.inputs))
     self.assertTrue('install_agent' in deployment.inputs)
     self.assertFalse(deployment.inputs['install_agent'])
Esempio n. 35
0
    def test_fail_remote_task_eventual_failure(self):
        deployment_id = 'd{0}'.format(uuid.uuid4())
        self.assertRaises(RuntimeError,
                          self.deploy_and_execute_workflow,
                          resource('dsl/workflow_api.yaml'),
                          self._testMethodName,
                          deployment_id=deployment_id,
                          parameters={'do_get': self.do_get})

        # testing workflow remote task
        invocations = self.get_runtime_property(deployment_id,
                                                'failure_invocation')[0]
        self.assertEqual(3, len(invocations))
        for i in range(len(invocations) - 1):
            self.assertLessEqual(1, invocations[i + 1] - invocations[i])
Esempio n. 36
0
 def test_resources_available(self):
     container_ip = self.get_manager_ip()
     blueprint_id = 'b{0}'.format(uuid.uuid4())
     blueprint_name = 'empty_blueprint.yaml'
     blueprint_path = resource('dsl/{0}'.format(blueprint_name))
     self.client.blueprints.upload(blueprint_path, entity_id=blueprint_id)
     invalid_resource_url = 'https://{0}:{1}/resources/blueprints/{1}/{2}' \
         .format(container_ip, 53229, blueprint_id, blueprint_name)
     try:
         result = requests.head(invalid_resource_url)
         self.assertEqual(result.status_code,
                          requests.status_codes.codes.not_found,
                          "Resources are available through port 53229.")
     except ConnectionError:
         pass
Esempio n. 37
0
 def _test_host_plugin_requires_old_package(self, blueprint_path):
     dsl_path = resource(blueprint_path)
     inputs = {
         'server_ip':
         self._get_host_ip(),
         'agent_private_key_path':
         self.get_host_key_path(node_id=self.setup_node_id,
                                deployment_id=self.setup_deployment_id),
         'agent_user':
         '******'
     }
     deployment, _ = self.deploy_application(dsl_path,
                                             inputs=inputs,
                                             timeout_seconds=90)
     self.undeploy_application(deployment.id)
 def setUp(self):
     super(DownloadBlueprintTest, self).setUp()
     self.blueprint_id = 'b{0}'.format(uuid.uuid4())
     self.blueprint_file = '{0}.tar.gz'.format(self.blueprint_id)
     self.downloaded_archive_path = str(self.workdir / self.blueprint_file)
     self.downloaded_extracted_dir = str(self.workdir / 'extracted')
     self.test_blueprint_dir = str(self.workdir / 'blueprint')
     os.mkdir(self.test_blueprint_dir)
     self.large_file_location = os.path.join(self.test_blueprint_dir,
                                             'just_a_large_file.img')
     blueprint_src = resource('dsl/empty_blueprint.yaml')
     self.original_blueprint_file = os.path.join(self.test_blueprint_dir,
                                                 'blueprint.yaml')
     shutil.copy(blueprint_src, self.original_blueprint_file)
     self._create_file('50M', self.large_file_location)
 def test_deployment_workflows(self):
     dsl_path = resource("dsl/custom_workflow_mapping.yaml")
     deployment, _ = self.deploy_application(dsl_path)
     deployment_id = deployment.id
     workflows = self.client.deployments.get(deployment_id).workflows
     self.assertEqual(8, len(workflows))
     wf_ids = [x.name for x in workflows]
     self.assertIn('uninstall', wf_ids)
     self.assertIn('install', wf_ids)
     self.assertIn('execute_operation', wf_ids)
     self.assertIn('custom', wf_ids)
     self.assertIn('scale', wf_ids)
     self.assertIn('heal', wf_ids)
     self.assertIn('install_new_agents', wf_ids)
     self.assertIn('update', wf_ids)
Esempio n. 40
0
 def test_group_deployment_modification(self):
     # this test specifically tests elasticsearch's implementation
     # of update_deployment. other features are tested elsewhere.
     deployment = self.deploy(
         resource('dsl/deployment_modification_groups.yaml'))
     modification = self.client.deployment_modifications.start(
         deployment_id=deployment.id,
         nodes={'compute_and_ip': {
             'instances': 2
         }})
     self.client.deployment_modifications.finish(modification.id)
     deployment = self.client.deployments.get(deployment.id)
     scaling_group = deployment['scaling_groups']['compute_and_ip']
     self.assertEqual(2, scaling_group['properties']['planned_instances'])
     self.assertEqual(2, scaling_group['properties']['current_instances'])
Esempio n. 41
0
    def test_get_secret_intrinsic_function(self):
        dsl_path = resource("dsl/basic_get_secret.yaml")

        # Fails to create deployment because the secret is missing
        error_msg = "^400: Required secrets .*? don't exist in this tenant$"
        self.assertRaisesRegexp(
            UnknownDeploymentSecretError,
            error_msg,
            self.deploy_application,
            dsl_path
        )

        # Manage to create deployment after creating the secret
        self.client.secrets.create('port', '8080')
        deployment, _ = self.deploy_application(dsl_path)
Esempio n. 42
0
 def test_stop_monitor_node_operation(self):
     dsl_path = resource("dsl/hardcoded_operation_properties.yaml")
     deployment, _ = self.deploy_application(dsl_path)
     deployment_id = deployment.id
     self.undeploy_application(deployment_id)
     # test monitor invocations
     node_id = self.client.node_instances.list(
         deployment_id=deployment_id)[0].id
     node_instance = self.client.node_instances.get(node_id)
     invocations = node_instance.runtime_properties[
         'monitoring_operations_invocation']
     self.assertEqual(2, len(invocations))
     self.assertTrue('single_node' in invocations[0]['id'])
     self.assertEqual('start_monitor', invocations[0]['operation'])
     self.assertTrue('single_node' in invocations[1]['id'])
     self.assertEqual('stop_monitor', invocations[1]['operation'])
    def test_update_runtime_properties(self):
        dsl_path = resource("dsl/set_property.yaml")

        # testing set property
        deployment, _ = self.deploy_application(dsl_path)
        node_id = self.client.node_instances.list(
            deployment_id=deployment.id)[0].id
        node_runtime_props = self.client.node_instances.get(
            node_id).runtime_properties
        self.assertEqual('property_value', node_runtime_props['property_name'])

        # testing delete property
        self.undeploy_application(deployment.id)
        node_runtime_props = self.client.node_instances.get(
            node_id).runtime_properties
        self.assertNotIn('property_name', node_runtime_props)
 def test_subgraph_retries_provider_config_config(self):
     context = {'cloudify': {'workflows': {
         'task_retries': 0,
         'task_retry_interval': 0,
         'subgraph_retries': 2
     }}}
     deployment_id = 'd{0}'.format(uuid.uuid4())
     self.client.manager.create_context(self._testMethodName, context)
     self.deploy_application(
         resource('dsl/workflow_subgraph_retries.yaml'),
         deployment_id=deployment_id)
     invocations = self.get_plugin_data(
         plugin_name='testmockoperations',
         deployment_id=deployment_id
     )['failure_invocation']
     self.assertEqual(len(invocations), 3)
Esempio n. 45
0
 def test_deployment_inputs(self):
     blueprint_id = 'b{0}'.format(uuid.uuid4())
     self.client.blueprints.upload(resource("dsl/basic.yaml"), blueprint_id)
     wait_for_blueprint_upload(blueprint_id, self.client)
     blueprint = self.client.blueprints.get(blueprint_id)
     inputs = blueprint.plan['inputs']
     self.assertEqual(1, len(inputs))
     self.assertTrue('install_agent' in inputs)
     self.assertFalse(inputs['install_agent']['default'])
     self.assertTrue(len(inputs['install_agent']['description']) > 0)
     deployment_id = 'd{0}'.format(uuid.uuid4())
     deployment = self.client.deployments.create(
         blueprint.id, deployment_id, skip_plugins_validation=True)
     self.assertEqual(1, len(deployment.inputs))
     self.assertTrue('install_agent' in deployment.inputs)
     self.assertFalse(deployment.inputs['install_agent'])
Esempio n. 46
0
    def _update_inputs_with_dev_resource_urls(inputs):
        """Update the inputs to be sent to the manager with the resource URLs
        from dev_resource_urls.yaml

        To be used during development, when the integration test needs updated
        code during bootstrap (as opposed to the mount docl does after
        bootstrap)

        :param inputs: inputs dict
        :return: The updated inputs dict
        """
        inputs = inputs or {}
        dev_resources_path = resource('dev_resource_urls.yaml')
        with open(dev_resources_path, 'r') as f:
            inputs.update(yaml.load(f))
        return inputs
    def test_deployment_statuses_during_cancelling_without_install_nodes(self):
        # Create deployment environment + execute "sleep_with_cancel_support"
        dsl_path = resource("dsl/sleep_workflows.yaml")
        deployment = self.deploy(dsl_path)
        execution = self.execute_workflow(workflow_name='simple_sleep',
                                          deployment_id=deployment.id,
                                          wait_for_execution=False)

        self.client.executions.cancel(execution.id)
        deployment = self.client.deployments.get(deployment.id)
        self.assertEqual(deployment.latest_execution_status,
                         DeploymentState.IN_PROGRESS)
        self.assertEqual(deployment.installation_status,
                         DeploymentState.INACTIVE)
        self.assertEqual(deployment.deployment_status,
                         DeploymentState.IN_PROGRESS)
Esempio n. 48
0
    def test_fail_remote_task_eventual_failure(self):
        deployment_id = str(uuid.uuid4())
        self.assertRaises(RuntimeError,
                          self.deploy_and_execute_workflow,
                          resource('dsl/workflow_api.yaml'),
                          self._testMethodName,
                          deployment_id=deployment_id,
                          parameters={'do_get': self.do_get})

        # testing workflow remote task
        invocations = self.get_plugin_data(
            plugin_name='testmockoperations',
            deployment_id=deployment_id)['failure_invocation']
        self.assertEqual(3, len(invocations))
        for i in range(len(invocations) - 1):
            self.assertLessEqual(1, invocations[i + 1] - invocations[i])
Esempio n. 49
0
    def test_deploy_with_agent_worker_windows_3_2(self):
        dsl_path = resource('dsl/with_agent_worker_windows_3_2.yaml')
        deployment, _ = self.deploy_application(dsl_path, timeout_seconds=500)
        deployment_nodes = self.client.node_instances.list(
            deployment_id=deployment.id)

        webserver_nodes = filter(lambda node: 'host' not in node.node_id,
                                 deployment_nodes)
        self.assertEquals(1, len(webserver_nodes))
        webserver_node = webserver_nodes[0]
        invocations = self.get_plugin_data(
            plugin_name='mock_agent_plugin',
            deployment_id=deployment.id)[webserver_node.id]

        agent_installer_data = self.get_plugin_data(
            plugin_name='windows_agent_installer', deployment_id=deployment.id)

        self.assertEqual(
            agent_installer_data[webserver_node.host_id]['states'],
            ['created', 'configured', 'started'])

        plugin_installer_data = self.get_plugin_data(
            plugin_name='windows_plugin_installer',
            deployment_id=deployment.id)

        self.assertEqual(
            plugin_installer_data[webserver_node.host_id]['mock_agent_plugin'],
            ['installed'])

        expected_invocations = ['create', 'start']
        self.assertListEqual(invocations, expected_invocations)

        self.undeploy_application(deployment_id=deployment.id)
        invocations = self.get_plugin_data(
            plugin_name='mock_agent_plugin',
            deployment_id=deployment.id)[webserver_node.id]

        expected_invocations = ['create', 'start', 'stop', 'delete']
        self.assertListEqual(invocations, expected_invocations)

        # agent on host should have also
        # been stopped and uninstalled
        agent_installer_data = self.get_plugin_data(
            plugin_name='windows_agent_installer', deployment_id=deployment.id)
        self.assertEqual(
            agent_installer_data[webserver_node.host_id]['states'],
            ['created', 'configured', 'started', 'stopped', 'deleted'])
Esempio n. 50
0
    def test_uninstall_application_single_node_no_host(self):
        dsl_path = resource("dsl/single_node_no_host.yaml")
        deployment, _ = self.deploy_application(dsl_path)
        deployment_id = deployment.id
        self.undeploy_application(deployment_id)

        node_id = self.client.node_instances.list(
            deployment_id=deployment_id)[0].id
        node_instance = self.client.node_instances.get(node_id)
        unreachable_call_order = node_instance.runtime_properties[
            'unreachable_call_order']

        unreachable_called = is_unreachable_called(node_id,
                                                   unreachable_call_order)
        self.assertTrue(unreachable_called)

        self.assertEqual('deleted', node_instance['state'])
 def test_deploy_multi_instance_many_different_hosts(self):
     dsl_path = resource('dsl/multi_instance_many_different_hosts.yaml')
     deployment, _ = self.deploy_application(dsl_path, timeout_seconds=180)
     machines = set()
     for host_ni in self.client.node_instances.list():
         machines.update(
             host_id for host_id, state
             in host_ni.runtime_properties.get('machines', {}).items()
             if state == 'running'
         )
     self.assertEqual(15, len(machines))
     self.assertEqual(
         5, len([ma for ma in machines if ma.startswith('host1')]))
     self.assertEqual(
         5, len([ma for ma in machines if ma.startswith('host2')]))
     self.assertEqual(
         5, len([ma for ma in machines if ma.startswith('host3')]))
    def test_deploy_multi_instance_application(self):
        dsl_path = resource("dsl/multi_instance.yaml")
        deployment, _ = self.deploy_application(dsl_path)

        machines = set()
        for host_ni in self.client.node_instances.list(node_id='host'):
            machines.update(
                host_id for host_id, state
                in host_ni.runtime_properties.get('machines', {}).items()
                if state == 'running'
            )
        machines_with_apps = set()
        for app_ni in self.client.node_instances.list(node_id='app_module'):
            machines_with_apps.update(
                app_ni.runtime_properties.get('capabilities', {})
            )
        assert machines == machines_with_apps
Esempio n. 53
0
    def test_maintenance_mode(self):
        blueprint_id = 'b{0}'.format(uuid.uuid4())
        deployment_id = blueprint_id
        blueprint_path = resource('dsl/agent_tests/maintenance_mode.yaml')
        self.client.blueprints.upload(blueprint_path, entity_id=blueprint_id)
        wait_for_blueprint_upload(blueprint_id, self.client)
        self.client.deployments.create(blueprint_id=blueprint_id,
                                       deployment_id=deployment_id)
        wait_for_deployment_creation_to_complete(self.env.container_id,
                                                 deployment_id, self.client)

        # Running none blocking installation
        execution = self.client.executions.start(deployment_id=deployment_id,
                                                 workflow_id='install')
        self.wait_for_execution_status(execution.id, status=Execution.STARTED)

        self.logger.info(
            "checking if maintenance status has status 'deactivated'")
        self._check_maintenance_status('deactivated')

        self.logger.info('activating maintenance mode')
        self.client.maintenance_mode.activate()
        self.addCleanup(self.cleanup)

        self.logger.info(
            "checking if maintenance status has changed to 'activating'")
        self.do_assertions(self._check_maintenance_status,
                           timeout=60,
                           status='activating')

        self.logger.info('cancelling installation')
        self.client.executions.cancel(execution['id'])

        self.logger.info(
            "checking if maintenance status has changed to 'activated'")
        self.do_assertions(self._check_maintenance_status,
                           timeout=60,
                           status='activated')

        self.logger.info('deactivating maintenance mode')
        self.client.maintenance_mode.deactivate()
        self.logger.info(
            "checking if maintenance status has changed to 'deactivated'")
        self.do_assertions(self._check_maintenance_status,
                           timeout=60,
                           status='deactivated')
Esempio n. 54
0
    def test_simple(self):
        parameters = {'do_get': self.do_get, 'key': 'key1', 'value': 'value1'}
        result_dict = {'key1': 'value1'}
        deployment, _ = self.deploy_and_execute_workflow(
            resource('dsl/workflow_api.yaml'),
            self._testMethodName,
            parameters=parameters)

        # testing workflow remote task
        invocation = self.get_runtime_property(deployment.id,
                                               'mock_operation_invocation')[0]
        self.assertDictEqual(result_dict, invocation[0])

        # testing workflow local task
        instance = self.client.node_instances.list(
            deployment_id=deployment.id)[0]
        self.assertEqual('test_state', instance.state)
Esempio n. 55
0
 def test_plugin_from_source(self):
     dsl_path = resource('dsl/agent_tests/with_agent_source_plugin.yaml')
     deployment, _ = self.deploy_application(dsl_path)
     mgmtworker_plugin_dir = os.path.join(
         '/opt/mgmtworker/env/source_plugins', 'default_tenant',
         deployment.id, 'sourceplugin', '0.0.0')
     # the plugin sets the 'ok' runtime property, let's check that it
     # did run on both the mgmtworker and the agent
     node1 = self.client.node_instances.list(deployment_id=deployment.id,
                                             node_id='node1')[0]
     node2 = self.client.node_instances.list(deployment_id=deployment.id,
                                             node_id='node2')[0]
     assert node1.runtime_properties.get('ok')
     assert node2.runtime_properties.get('ok')
     assert self.directory_exists(mgmtworker_plugin_dir)
     self.undeploy_application(deployment.id, is_delete_deployment=True)
     assert not self.directory_exists(mgmtworker_plugin_dir)
    def _execute_from_resource(self, workflow_id, workflow_params=None,
                               resource_file=None):
        dsl_path = resource(resource_file)
        _id = uuid.uuid1()
        blueprint_id = 'blueprint_{0}'.format(_id)
        deployment_id = 'deployment_{0}'.format(_id)
        self.client.blueprints.upload(dsl_path, blueprint_id)
        self.client.deployments.create(blueprint_id, deployment_id,
                                       skip_plugins_validation=True)
        do_retries(verify_deployment_env_created, 30,
                   deployment_id=deployment_id)
        execution = self.client.executions.start(
            deployment_id, workflow_id, parameters=workflow_params)
        node_inst_id = self.client.node_instances.list(
            deployment_id=deployment_id)[0].id

        return execution, node_inst_id, deployment_id
    def _manual_deployment_update(self, deployment):
        """Sneakily update the deployment, without running deployment update.

        This is mocking out the deployment update procedure for the purposes
        of this test.

        This will allow us to simply rerun a workflow without explicitly
        updating it, which allows to check that the new workflow run will
        use the updated values just because of them having been changed,
        not because of what deployment update does.
        """
        update_script = resource('scripts/update_deployment.py')
        self.copy_file_to_manager(update_script, '/tmp/update_deployment.py')

        self.execute_on_manager(
            '/opt/manager/env/bin/python /tmp/update_deployment.py '
            '--deployment-id {0}'.format(deployment.id))
Esempio n. 58
0
    def test_scheduled_execution(self):

        # The token in the container is invalid, create new valid one
        create_api_token()
        dsl_path = resource('dsl/basic.yaml')
        dep = self.deploy(dsl_path, wait=False, client=self.client)
        dep_id = dep.id
        do_retries(verify_deployment_env_created, 30, deployment_id=dep_id)
        scheduled_time = generate_scheduled_for_date()

        execution = self.client.executions.start(deployment_id=dep_id,
                                                 workflow_id='install',
                                                 schedule=scheduled_time)
        self.assertEquals(Execution.SCHEDULED, execution.status)

        time.sleep(62)  # Wait for exec to 'wake up'
        self.wait_for_execution_to_end(execution)
Esempio n. 59
0
    def test_execution_parameters(self):
        dsl_path = resource('dsl/workflow_parameters.yaml')
        _id = uuid.uuid1()
        blueprint_id = 'blueprint_{0}'.format(_id)
        deployment_id = 'deployment_{0}'.format(_id)
        self.client.blueprints.upload(dsl_path, blueprint_id)
        self.client.deployments.create(blueprint_id,
                                       deployment_id,
                                       skip_plugins_validation=True)
        do_retries(verify_deployment_env_created,
                   60,
                   deployment_id=deployment_id)
        execution_parameters = {
            'operation': 'test_interface.operation',
            'properties': {
                'key': 'different-key',
                'value': 'different-value'
            },
            'custom-parameter': "doesn't matter"
        }
        execution = self.client.executions.start(
            deployment_id,
            'another_execute_operation',
            parameters=execution_parameters,
            allow_custom_parameters=True)
        self.wait_for_execution_to_end(execution)
        invocations = self.get_plugin_data(
            plugin_name='testmockoperations',
            deployment_id=deployment_id)['mock_operation_invocation']
        self.assertEqual(1, len(invocations))
        self.assertDictEqual(invocations[0],
                             {'different-key': 'different-value'})

        # checking for execution parameters - expecting there to be a merge
        # with overrides with workflow parameters.
        expected_params = {
            'node_id': 'test_node',
            'operation': 'test_interface.operation',
            'properties': {
                'key': 'different-key',
                'value': 'different-value'
            },
            'custom-parameter': "doesn't matter"
        }
        self.assertEqual(expected_params, execution.parameters)
Esempio n. 60
0
    def test_run_exec_from_queue_while_system_execution_is_queued(self):
        """
        - System execution (snapshot) is running
        - Queue contains: a regular execution and another system execution
        Once the first snapshot finishes we expect the regular execution to run
        (even though snapshot_2 is in the queue) and the second snapshot
        to be queued again.

        """
        # Create deployment
        dsl_path = resource('dsl/sleep_workflows.yaml')
        deployment = self.deploy(dsl_path)

        # Create snapshot and make sure it's state remains 'started'
        # so that new executions will be queued
        snapshot = self._create_snapshot_and_modify_execution_status(
            Execution.STARTED)

        # Start 'install' execution
        execution = self.execute_workflow(workflow_name='sleep',
                                          deployment_id=deployment.id,
                                          wait_for_execution=False,
                                          queue=True)

        # Create another system execution
        snapshot_2 = self.client.snapshots.create('snapshot_2',
                                                  include_credentials=True,
                                                  include_logs=True,
                                                  include_events=True,
                                                  include_metrics=True,
                                                  queue=True)

        # Make sure execution and snapshot_2 are queued (since there's a
        # running system execution)
        self._assert_execution_status(snapshot_2.id, Execution.QUEUED)
        self._assert_execution_status(execution.id, Execution.QUEUED)

        # Update first snapshot status to terminated
        self.client.executions.update(snapshot.id, Execution.TERMINATED)

        # Make sure exeuction status is started (or pending) even though
        # there's a queued system execution
        current_status = self.client.executions.get(execution.id).status
        self.assertIn(current_status, [Execution.PENDING, Execution.STARTED])
        self._assert_execution_status(snapshot_2.id, Execution.QUEUED)